mirror of
https://github.com/arnaucube/Nova.git
synced 2026-01-11 08:31:29 +01:00
spark-based commitments to R1CS matrices (#152)
* spark-based commitments to R1CS matrices * small fixes
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "nova-snark"
|
||||
version = "0.18.2"
|
||||
version = "0.19.0"
|
||||
authors = ["Srinath Setty <srinath@microsoft.com>"]
|
||||
edition = "2021"
|
||||
description = "Recursive zkSNARKs without trusted setup"
|
||||
|
||||
@@ -17,8 +17,10 @@ type G1 = pasta_curves::pallas::Point;
|
||||
type G2 = pasta_curves::vesta::Point;
|
||||
type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine<G1>;
|
||||
type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine<G2>;
|
||||
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1>;
|
||||
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2>;
|
||||
type CC1 = nova_snark::spartan::spark::TrivialCompComputationEngine<G1, EE1>;
|
||||
type CC2 = nova_snark::spartan::spark::TrivialCompComputationEngine<G2, EE2>;
|
||||
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1, CC1>;
|
||||
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2, CC2>;
|
||||
type C1 = NonTrivialTestCircuit<<G1 as Group>::Scalar>;
|
||||
type C2 = TrivialTestCircuit<<G2 as Group>::Scalar>;
|
||||
|
||||
@@ -50,7 +52,7 @@ fn bench_compressed_snark(c: &mut Criterion) {
|
||||
);
|
||||
|
||||
// Produce prover and verifier keys for CompressedSNARK
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp);
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap();
|
||||
|
||||
// produce a recursive SNARK
|
||||
let num_steps = 3;
|
||||
|
||||
@@ -256,13 +256,15 @@ fn main() {
|
||||
|
||||
// produce a compressed SNARK
|
||||
println!("Generating a CompressedSNARK using Spartan with IPA-PC...");
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp);
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap();
|
||||
|
||||
let start = Instant::now();
|
||||
type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine<G1>;
|
||||
type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine<G2>;
|
||||
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1>;
|
||||
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2>;
|
||||
type CC1 = nova_snark::spartan::spark::TrivialCompComputationEngine<G1, EE1>;
|
||||
type CC2 = nova_snark::spartan::spark::TrivialCompComputationEngine<G2, EE2>;
|
||||
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1, CC1>;
|
||||
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2, CC2>;
|
||||
|
||||
let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark);
|
||||
println!(
|
||||
|
||||
@@ -46,8 +46,7 @@ mod tests {
|
||||
// First create the shape
|
||||
let mut cs: ShapeCS<G> = ShapeCS::new();
|
||||
let _ = synthesize_alloc_bit(&mut cs);
|
||||
let shape = cs.r1cs_shape();
|
||||
let ck = cs.commitment_key();
|
||||
let (shape, ck) = cs.r1cs_shape();
|
||||
|
||||
// Now get the assignment
|
||||
let mut cs: SatisfyingAssignment<G> = SatisfyingAssignment::new();
|
||||
|
||||
@@ -22,12 +22,10 @@ pub trait NovaWitness<G: Group> {
|
||||
) -> Result<(R1CSInstance<G>, R1CSWitness<G>), NovaError>;
|
||||
}
|
||||
|
||||
/// `NovaShape` provides methods for acquiring `R1CSShape` and `R1CSGens` from implementers.
|
||||
/// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` from implementers.
|
||||
pub trait NovaShape<G: Group> {
|
||||
/// Return an appropriate `R1CSShape` struct.
|
||||
fn r1cs_shape(&self) -> R1CSShape<G>;
|
||||
/// Return an appropriate `CommitmentKey` struct.
|
||||
fn commitment_key(&self) -> CommitmentKey<G>;
|
||||
/// Return an appropriate `R1CSShape` and `CommitmentKey` structs.
|
||||
fn r1cs_shape(&self) -> (R1CSShape<G>, CommitmentKey<G>);
|
||||
}
|
||||
|
||||
impl<G: Group> NovaWitness<G> for SatisfyingAssignment<G>
|
||||
@@ -54,7 +52,7 @@ impl<G: Group> NovaShape<G> for ShapeCS<G>
|
||||
where
|
||||
G::Scalar: PrimeField,
|
||||
{
|
||||
fn r1cs_shape(&self) -> R1CSShape<G> {
|
||||
fn r1cs_shape(&self) -> (R1CSShape<G>, CommitmentKey<G>) {
|
||||
let mut A: Vec<(usize, usize, G::Scalar)> = Vec::new();
|
||||
let mut B: Vec<(usize, usize, G::Scalar)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, G::Scalar)> = Vec::new();
|
||||
@@ -84,11 +82,9 @@ where
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
S
|
||||
}
|
||||
let ck = R1CS::<G>::commitment_key(&S);
|
||||
|
||||
fn commitment_key(&self) -> CommitmentKey<G> {
|
||||
R1CS::<G>::commitment_key(self.num_constraints(), self.num_aux())
|
||||
(S, ck)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -400,7 +400,7 @@ mod tests {
|
||||
);
|
||||
let mut cs: ShapeCS<G1> = ShapeCS::new();
|
||||
let _ = circuit1.synthesize(&mut cs);
|
||||
let (shape1, ck1) = (cs.r1cs_shape(), cs.commitment_key());
|
||||
let (shape1, ck1) = cs.r1cs_shape();
|
||||
assert_eq!(cs.num_constraints(), 9815);
|
||||
|
||||
// Initialize the shape and ck for the secondary
|
||||
@@ -413,7 +413,7 @@ mod tests {
|
||||
);
|
||||
let mut cs: ShapeCS<G2> = ShapeCS::new();
|
||||
let _ = circuit2.synthesize(&mut cs);
|
||||
let (shape2, ck2) = (cs.r1cs_shape(), cs.commitment_key());
|
||||
let (shape2, ck2) = cs.r1cs_shape();
|
||||
assert_eq!(cs.num_constraints(), 10347);
|
||||
|
||||
// Execute the base case for the primary
|
||||
|
||||
@@ -44,4 +44,10 @@ pub enum NovaError {
|
||||
/// returned when the transcript engine encounters an overflow of the round number
|
||||
#[error("InternalTranscriptError")]
|
||||
InternalTranscriptError,
|
||||
/// returned when the multiset check fails
|
||||
#[error("InvalidMultisetProof")]
|
||||
InvalidMultisetProof,
|
||||
/// returned when the product proof check fails
|
||||
#[error("InvalidProductProof")]
|
||||
InvalidProductProof,
|
||||
}
|
||||
|
||||
@@ -975,8 +975,7 @@ mod tests {
|
||||
let mut cs: ShapeCS<G2> = ShapeCS::new();
|
||||
let _ = synthesize_smul::<G1, _>(cs.namespace(|| "synthesize"));
|
||||
println!("Number of constraints: {}", cs.num_constraints());
|
||||
let shape = cs.r1cs_shape();
|
||||
let ck = cs.commitment_key();
|
||||
let (shape, ck) = cs.r1cs_shape();
|
||||
|
||||
// Then the satisfying assignment
|
||||
let mut cs: SatisfyingAssignment<G2> = SatisfyingAssignment::new();
|
||||
@@ -1017,8 +1016,7 @@ mod tests {
|
||||
let mut cs: ShapeCS<G2> = ShapeCS::new();
|
||||
let _ = synthesize_add_equal::<G1, _>(cs.namespace(|| "synthesize add equal"));
|
||||
println!("Number of constraints: {}", cs.num_constraints());
|
||||
let shape = cs.r1cs_shape();
|
||||
let ck = cs.commitment_key();
|
||||
let (shape, ck) = cs.r1cs_shape();
|
||||
|
||||
// Then the satisfying assignment
|
||||
let mut cs: SatisfyingAssignment<G2> = SatisfyingAssignment::new();
|
||||
@@ -1063,8 +1061,7 @@ mod tests {
|
||||
let mut cs: ShapeCS<G2> = ShapeCS::new();
|
||||
let _ = synthesize_add_negation::<G1, _>(cs.namespace(|| "synthesize add equal"));
|
||||
println!("Number of constraints: {}", cs.num_constraints());
|
||||
let shape = cs.r1cs_shape();
|
||||
let ck = cs.commitment_key();
|
||||
let (shape, ck) = cs.r1cs_shape();
|
||||
|
||||
// Then the satisfying assignment
|
||||
let mut cs: SatisfyingAssignment<G2> = SatisfyingAssignment::new();
|
||||
|
||||
116
src/lib.rs
116
src/lib.rs
@@ -106,7 +106,7 @@ where
|
||||
);
|
||||
let mut cs: ShapeCS<G1> = ShapeCS::new();
|
||||
let _ = circuit_primary.synthesize(&mut cs);
|
||||
let (r1cs_shape_primary, ck_primary) = (cs.r1cs_shape(), cs.commitment_key());
|
||||
let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape();
|
||||
|
||||
// Initialize ck for the secondary
|
||||
let circuit_secondary: NovaAugmentedCircuit<G1, C2> = NovaAugmentedCircuit::new(
|
||||
@@ -117,7 +117,7 @@ where
|
||||
);
|
||||
let mut cs: ShapeCS<G2> = ShapeCS::new();
|
||||
let _ = circuit_secondary.synthesize(&mut cs);
|
||||
let (r1cs_shape_secondary, ck_secondary) = (cs.r1cs_shape(), cs.commitment_key());
|
||||
let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape();
|
||||
|
||||
Self {
|
||||
F_arity_primary,
|
||||
@@ -580,12 +580,15 @@ where
|
||||
/// Creates prover and verifier keys for `CompressedSNARK`
|
||||
pub fn setup(
|
||||
pp: &PublicParams<G1, G2, C1, C2>,
|
||||
) -> (
|
||||
ProverKey<G1, G2, C1, C2, S1, S2>,
|
||||
VerifierKey<G1, G2, C1, C2, S1, S2>,
|
||||
) {
|
||||
let (pk_primary, vk_primary) = S1::setup(&pp.ck_primary, &pp.r1cs_shape_primary);
|
||||
let (pk_secondary, vk_secondary) = S2::setup(&pp.ck_secondary, &pp.r1cs_shape_secondary);
|
||||
) -> Result<
|
||||
(
|
||||
ProverKey<G1, G2, C1, C2, S1, S2>,
|
||||
VerifierKey<G1, G2, C1, C2, S1, S2>,
|
||||
),
|
||||
NovaError,
|
||||
> {
|
||||
let (pk_primary, vk_primary) = S1::setup(&pp.ck_primary, &pp.r1cs_shape_primary)?;
|
||||
let (pk_secondary, vk_secondary) = S2::setup(&pp.ck_secondary, &pp.r1cs_shape_secondary)?;
|
||||
|
||||
let pk = ProverKey {
|
||||
pk_primary,
|
||||
@@ -607,7 +610,7 @@ where
|
||||
_p_c2: Default::default(),
|
||||
};
|
||||
|
||||
(pk, vk)
|
||||
Ok((pk, vk))
|
||||
}
|
||||
|
||||
/// Create a new `CompressedSNARK`
|
||||
@@ -785,8 +788,10 @@ mod tests {
|
||||
type G2 = pasta_curves::vesta::Point;
|
||||
type EE1 = provider::ipa_pc::EvaluationEngine<G1>;
|
||||
type EE2 = provider::ipa_pc::EvaluationEngine<G2>;
|
||||
type S1 = spartan::RelaxedR1CSSNARK<G1, EE1>;
|
||||
type S2 = spartan::RelaxedR1CSSNARK<G2, EE2>;
|
||||
type CC1 = spartan::spark::TrivialCompComputationEngine<G1, EE1>;
|
||||
type CC2 = spartan::spark::TrivialCompComputationEngine<G2, EE2>;
|
||||
type S1 = spartan::RelaxedR1CSSNARK<G1, EE1, CC1>;
|
||||
type S2 = spartan::RelaxedR1CSSNARK<G2, EE2, CC2>;
|
||||
use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};
|
||||
use core::marker::PhantomData;
|
||||
use ff::PrimeField;
|
||||
@@ -1011,7 +1016,7 @@ mod tests {
|
||||
assert_eq!(zn_secondary, vec![<G2 as Group>::Scalar::from(2460515u64)]);
|
||||
|
||||
// produce the prover and verifier keys for compressed snark
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp);
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap();
|
||||
|
||||
// produce a compressed SNARK
|
||||
let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark);
|
||||
@@ -1028,6 +1033,91 @@ mod tests {
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ivc_nontrivial_with_spark_compression() {
|
||||
let circuit_primary = TrivialTestCircuit::default();
|
||||
let circuit_secondary = CubicCircuit::default();
|
||||
|
||||
// produce public parameters
|
||||
let pp = PublicParams::<
|
||||
G1,
|
||||
G2,
|
||||
TrivialTestCircuit<<G1 as Group>::Scalar>,
|
||||
CubicCircuit<<G2 as Group>::Scalar>,
|
||||
>::setup(circuit_primary.clone(), circuit_secondary.clone());
|
||||
|
||||
let num_steps = 3;
|
||||
|
||||
// produce a recursive SNARK
|
||||
let mut recursive_snark: Option<
|
||||
RecursiveSNARK<
|
||||
G1,
|
||||
G2,
|
||||
TrivialTestCircuit<<G1 as Group>::Scalar>,
|
||||
CubicCircuit<<G2 as Group>::Scalar>,
|
||||
>,
|
||||
> = None;
|
||||
|
||||
for _i in 0..num_steps {
|
||||
let res = RecursiveSNARK::prove_step(
|
||||
&pp,
|
||||
recursive_snark,
|
||||
circuit_primary.clone(),
|
||||
circuit_secondary.clone(),
|
||||
vec![<G1 as Group>::Scalar::one()],
|
||||
vec![<G2 as Group>::Scalar::zero()],
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
recursive_snark = Some(res.unwrap());
|
||||
}
|
||||
|
||||
assert!(recursive_snark.is_some());
|
||||
let recursive_snark = recursive_snark.unwrap();
|
||||
|
||||
// verify the recursive SNARK
|
||||
let res = recursive_snark.verify(
|
||||
&pp,
|
||||
num_steps,
|
||||
vec![<G1 as Group>::Scalar::one()],
|
||||
vec![<G2 as Group>::Scalar::zero()],
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
|
||||
let (zn_primary, zn_secondary) = res.unwrap();
|
||||
|
||||
// sanity: check the claimed output with a direct computation of the same
|
||||
assert_eq!(zn_primary, vec![<G1 as Group>::Scalar::one()]);
|
||||
let mut zn_secondary_direct = vec![<G2 as Group>::Scalar::zero()];
|
||||
for _i in 0..num_steps {
|
||||
zn_secondary_direct = CubicCircuit::default().output(&zn_secondary_direct);
|
||||
}
|
||||
assert_eq!(zn_secondary, zn_secondary_direct);
|
||||
assert_eq!(zn_secondary, vec![<G2 as Group>::Scalar::from(2460515u64)]);
|
||||
|
||||
// run the compressed snark with Spark compiler
|
||||
type CC1Prime = spartan::spark::SparkEngine<G1, EE1>;
|
||||
type CC2Prime = spartan::spark::SparkEngine<G2, EE2>;
|
||||
type S1Prime = spartan::RelaxedR1CSSNARK<G1, EE1, CC1Prime>;
|
||||
type S2Prime = spartan::RelaxedR1CSSNARK<G2, EE2, CC2Prime>;
|
||||
|
||||
// produce the prover and verifier keys for compressed snark
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1Prime, S2Prime>::setup(&pp).unwrap();
|
||||
|
||||
// produce a compressed SNARK
|
||||
let res = CompressedSNARK::<_, _, _, _, S1Prime, S2Prime>::prove(&pp, &pk, &recursive_snark);
|
||||
assert!(res.is_ok());
|
||||
let compressed_snark = res.unwrap();
|
||||
|
||||
// verify the compressed SNARK
|
||||
let res = compressed_snark.verify(
|
||||
&vk,
|
||||
num_steps,
|
||||
vec![<G1 as Group>::Scalar::one()],
|
||||
vec![<G2 as Group>::Scalar::zero()],
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ivc_nondet_with_compression() {
|
||||
// y is a non-deterministic advice representing the fifth root of the input at a step.
|
||||
@@ -1162,7 +1252,7 @@ mod tests {
|
||||
assert!(res.is_ok());
|
||||
|
||||
// produce the prover and verifier keys for compressed snark
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp);
|
||||
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap();
|
||||
|
||||
// produce a compressed SNARK
|
||||
let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark);
|
||||
|
||||
@@ -171,8 +171,7 @@ mod tests {
|
||||
// First create the shape
|
||||
let mut cs: ShapeCS<G> = ShapeCS::new();
|
||||
let _ = synthesize_tiny_r1cs_bellperson(&mut cs, None);
|
||||
let shape = cs.r1cs_shape();
|
||||
let ck = cs.commitment_key();
|
||||
let (shape, ck) = cs.r1cs_shape();
|
||||
let ro_consts =
|
||||
<<G as Group>::RO as ROTrait<<G as Group>::Base, <G as Group>::Scalar>>::Constants::new();
|
||||
|
||||
@@ -305,7 +304,7 @@ mod tests {
|
||||
};
|
||||
|
||||
// generate generators and ro constants
|
||||
let ck = R1CS::<G>::commitment_key(num_cons, num_vars);
|
||||
let ck = R1CS::<G>::commitment_key(&S);
|
||||
let ro_consts =
|
||||
<<G as Group>::RO as ROTrait<<G as Group>::Base, <G as Group>::Scalar>>::Constants::new();
|
||||
|
||||
|
||||
@@ -190,6 +190,8 @@ where
|
||||
) -> Result<Self, NovaError> {
|
||||
transcript.dom_sep(Self::protocol_name());
|
||||
|
||||
let (ck, _) = ck.split_at(U.b_vec.len());
|
||||
|
||||
if U.b_vec.len() != W.a_vec.len() {
|
||||
return Err(NovaError::InvalidInputLength);
|
||||
}
|
||||
@@ -272,7 +274,7 @@ where
|
||||
// we create mutable copies of vectors and generators
|
||||
let mut a_vec = W.a_vec.to_vec();
|
||||
let mut b_vec = U.b_vec.to_vec();
|
||||
let mut ck = ck.clone();
|
||||
let mut ck = ck;
|
||||
for _i in 0..(U.b_vec.len() as f64).log2() as usize {
|
||||
let (L, R, a_vec_folded, b_vec_folded, ck_folded) =
|
||||
prove_inner(&a_vec, &b_vec, &ck, transcript)?;
|
||||
@@ -300,6 +302,8 @@ where
|
||||
U: &InnerProductInstance<G>,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(), NovaError> {
|
||||
let (ck, _) = ck.split_at(U.b_vec.len());
|
||||
|
||||
transcript.dom_sep(Self::protocol_name());
|
||||
if U.b_vec.len() != n
|
||||
|| n != (1 << self.L_vec.len())
|
||||
@@ -383,7 +387,7 @@ where
|
||||
};
|
||||
|
||||
let ck_hat = {
|
||||
let c = CE::<G>::commit(ck, &s).compress();
|
||||
let c = CE::<G>::commit(&ck, &s).compress();
|
||||
CommitmentKey::<G>::reinterpret_commitments_as_ck(&[c])?
|
||||
};
|
||||
|
||||
|
||||
@@ -72,8 +72,11 @@ pub struct RelaxedR1CSInstance<G: Group> {
|
||||
|
||||
impl<G: Group> R1CS<G> {
|
||||
/// Samples public parameters for the specified number of constraints and variables in an R1CS
|
||||
pub fn commitment_key(num_cons: usize, num_vars: usize) -> CommitmentKey<G> {
|
||||
G::CE::setup(b"ck", max(num_vars, num_cons))
|
||||
pub fn commitment_key(S: &R1CSShape<G>) -> CommitmentKey<G> {
|
||||
let num_cons = S.num_cons;
|
||||
let num_vars = S.num_vars;
|
||||
let num_nz = max(max(S.A.len(), S.B.len()), S.C.len());
|
||||
G::CE::setup(b"ck", max(max(num_cons, num_vars), num_nz))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
30
src/spartan/math.rs
Normal file
30
src/spartan/math.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
pub trait Math {
|
||||
fn pow2(self) -> usize;
|
||||
fn get_bits(self, num_bits: usize) -> Vec<bool>;
|
||||
fn log_2(self) -> usize;
|
||||
}
|
||||
|
||||
impl Math for usize {
|
||||
#[inline]
|
||||
fn pow2(self) -> usize {
|
||||
let base: usize = 2;
|
||||
base.pow(self as u32)
|
||||
}
|
||||
|
||||
/// Returns the num_bits from n in a canonical order
|
||||
fn get_bits(self, num_bits: usize) -> Vec<bool> {
|
||||
(0..num_bits)
|
||||
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
|
||||
.collect::<Vec<bool>>()
|
||||
}
|
||||
|
||||
fn log_2(self) -> usize {
|
||||
assert_ne!(self, 0);
|
||||
|
||||
if self.is_power_of_two() {
|
||||
(1usize.leading_zeros() - self.leading_zeros()) as usize
|
||||
} else {
|
||||
(0usize.leading_zeros() - self.leading_zeros()) as usize
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
//! This module implements RelaxedR1CSSNARKTrait using Spartan that is generic
|
||||
//! over the polynomial commitment and evaluation argument (i.e., a PCS)
|
||||
pub mod polynomial;
|
||||
mod math;
|
||||
pub(crate) mod polynomial;
|
||||
pub mod spark;
|
||||
mod sumcheck;
|
||||
|
||||
use crate::{
|
||||
@@ -8,6 +10,7 @@ use crate::{
|
||||
r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness},
|
||||
traits::{
|
||||
evaluation::EvaluationEngineTrait, snark::RelaxedR1CSSNARKTrait, Group, TranscriptEngineTrait,
|
||||
TranscriptReprTrait,
|
||||
},
|
||||
CommitmentKey,
|
||||
};
|
||||
@@ -18,20 +21,75 @@ use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sumcheck::SumcheckProof;
|
||||
|
||||
/// A trait that defines the behavior of a computation commitment engine
|
||||
pub trait CompCommitmentEngineTrait<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
/// A type that holds opening hint
|
||||
type Decommitment: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>;
|
||||
|
||||
/// A type that holds a commitment
|
||||
type Commitment: Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ TranscriptReprTrait<G>
|
||||
+ Serialize
|
||||
+ for<'de> Deserialize<'de>;
|
||||
|
||||
/// A type that holds an evaluation argument
|
||||
type EvaluationArgument: Send + Sync + Serialize + for<'de> Deserialize<'de>;
|
||||
|
||||
/// commits to R1CS matrices
|
||||
fn commit(
|
||||
ck: &CommitmentKey<G>,
|
||||
S: &R1CSShape<G>,
|
||||
) -> Result<(Self::Commitment, Self::Decommitment), NovaError>;
|
||||
|
||||
/// proves an evaluation of R1CS matrices viewed as polynomials
|
||||
fn prove(
|
||||
ck: &CommitmentKey<G>,
|
||||
ek: &EE::ProverKey,
|
||||
S: &R1CSShape<G>,
|
||||
decomm: &Self::Decommitment,
|
||||
comm: &Self::Commitment,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<Self::EvaluationArgument, NovaError>;
|
||||
|
||||
/// verifies an evaluation of R1CS matrices viewed as polynomials and returns verified evaluations
|
||||
fn verify(
|
||||
vk: &EE::VerifierKey,
|
||||
comm: &Self::Commitment,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
arg: &Self::EvaluationArgument,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(G::Scalar, G::Scalar, G::Scalar), NovaError>;
|
||||
}
|
||||
|
||||
/// A type that represents the prover's key
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct ProverKey<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
pub struct ProverKey<
|
||||
G: Group,
|
||||
EE: EvaluationEngineTrait<G, CE = G::CE>,
|
||||
CC: CompCommitmentEngineTrait<G, EE>,
|
||||
> {
|
||||
pk_ee: EE::ProverKey,
|
||||
S: R1CSShape<G>,
|
||||
decomm: CC::Decommitment,
|
||||
comm: CC::Commitment,
|
||||
}
|
||||
|
||||
/// A type that represents the verifier's key
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct VerifierKey<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
pub struct VerifierKey<
|
||||
G: Group,
|
||||
EE: EvaluationEngineTrait<G, CE = G::CE>,
|
||||
CC: CompCommitmentEngineTrait<G, EE>,
|
||||
> {
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
vk_ee: EE::VerifierKey,
|
||||
S: R1CSShape<G>,
|
||||
comm: CC::Commitment,
|
||||
}
|
||||
|
||||
/// A succinct proof of knowledge of a witness to a relaxed R1CS instance
|
||||
@@ -39,7 +97,11 @@ pub struct VerifierKey<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
/// the commitment to a vector viewed as a polynomial commitment
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct RelaxedR1CSSNARK<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
pub struct RelaxedR1CSSNARK<
|
||||
G: Group,
|
||||
EE: EvaluationEngineTrait<G, CE = G::CE>,
|
||||
CC: CompCommitmentEngineTrait<G, EE>,
|
||||
> {
|
||||
sc_proof_outer: SumcheckProof<G>,
|
||||
claims_outer: (G::Scalar, G::Scalar, G::Scalar),
|
||||
eval_E: G::Scalar,
|
||||
@@ -49,20 +111,40 @@ pub struct RelaxedR1CSSNARK<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>>
|
||||
eval_E_prime: G::Scalar,
|
||||
eval_W_prime: G::Scalar,
|
||||
eval_arg: EE::EvaluationArgument,
|
||||
eval_arg_cc: CC::EvaluationArgument,
|
||||
}
|
||||
|
||||
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G>
|
||||
for RelaxedR1CSSNARK<G, EE>
|
||||
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>, CC: CompCommitmentEngineTrait<G, EE>>
|
||||
RelaxedR1CSSNARKTrait<G> for RelaxedR1CSSNARK<G, EE, CC>
|
||||
{
|
||||
type ProverKey = ProverKey<G, EE>;
|
||||
type VerifierKey = VerifierKey<G, EE>;
|
||||
type ProverKey = ProverKey<G, EE, CC>;
|
||||
type VerifierKey = VerifierKey<G, EE, CC>;
|
||||
|
||||
fn setup(ck: &CommitmentKey<G>, S: &R1CSShape<G>) -> (Self::ProverKey, Self::VerifierKey) {
|
||||
fn setup(
|
||||
ck: &CommitmentKey<G>,
|
||||
S: &R1CSShape<G>,
|
||||
) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> {
|
||||
let (pk_ee, vk_ee) = EE::setup(ck);
|
||||
let pk = ProverKey { pk_ee, S: S.pad() };
|
||||
let vk = VerifierKey { vk_ee, S: S.pad() };
|
||||
|
||||
(pk, vk)
|
||||
let S = S.pad();
|
||||
|
||||
let (comm, decomm) = CC::commit(ck, &S)?;
|
||||
|
||||
let vk = VerifierKey {
|
||||
num_cons: S.num_cons,
|
||||
num_vars: S.num_vars,
|
||||
vk_ee,
|
||||
comm: comm.clone(),
|
||||
};
|
||||
|
||||
let pk = ProverKey {
|
||||
pk_ee,
|
||||
S,
|
||||
comm,
|
||||
decomm,
|
||||
};
|
||||
|
||||
Ok((pk, vk))
|
||||
}
|
||||
|
||||
/// produces a succinct proof of satisfiability of a RelaxedR1CS instance
|
||||
@@ -81,8 +163,8 @@ impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G
|
||||
assert_eq!(pk.S.num_io.next_power_of_two(), pk.S.num_io);
|
||||
assert!(pk.S.num_io < pk.S.num_vars);
|
||||
|
||||
// append the R1CSShape and RelaxedR1CSInstance to the transcript
|
||||
transcript.absorb(b"S", &pk.S);
|
||||
// append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript
|
||||
transcript.absorb(b"C", &pk.comm);
|
||||
transcript.absorb(b"U", U);
|
||||
|
||||
// compute the full satisfying assignment by concatenating W.W, U.u, and U.X
|
||||
@@ -209,6 +291,17 @@ impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G
|
||||
&mut transcript,
|
||||
)?;
|
||||
|
||||
// we now prove evaluations of R1CS matrices at (r_x, r_y)
|
||||
let eval_arg_cc = CC::prove(
|
||||
ck,
|
||||
&pk.pk_ee,
|
||||
&pk.S,
|
||||
&pk.decomm,
|
||||
&pk.comm,
|
||||
&(&r_x, &r_y),
|
||||
&mut transcript,
|
||||
)?;
|
||||
|
||||
let eval_W = MultilinearPolynomial::new(W.W.clone()).evaluate(&r_y[1..]);
|
||||
transcript.absorb(b"eval_W", &eval_W);
|
||||
|
||||
@@ -231,7 +324,7 @@ impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G
|
||||
let (sc_proof_batch, r_z, claims_batch) = SumcheckProof::prove_quad_sum(
|
||||
&claim_batch_joint,
|
||||
num_rounds_z,
|
||||
&mut MultilinearPolynomial::new(EqPolynomial::new(r_x).evals()),
|
||||
&mut MultilinearPolynomial::new(EqPolynomial::new(r_x.clone()).evals()),
|
||||
&mut MultilinearPolynomial::new(W.E.clone()),
|
||||
&mut MultilinearPolynomial::new(EqPolynomial::new(r_y[1..].to_vec()).evals()),
|
||||
&mut MultilinearPolynomial::new(W.W.clone()),
|
||||
@@ -266,6 +359,7 @@ impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G
|
||||
eval_E_prime,
|
||||
eval_W_prime,
|
||||
eval_arg,
|
||||
eval_arg_cc,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -273,13 +367,13 @@ impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G
|
||||
fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance<G>) -> Result<(), NovaError> {
|
||||
let mut transcript = G::TE::new(b"RelaxedR1CSSNARK");
|
||||
|
||||
// append the R1CSShape and RelaxedR1CSInstance to the transcript
|
||||
transcript.absorb(b"S", &vk.S);
|
||||
// append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript
|
||||
transcript.absorb(b"C", &vk.comm);
|
||||
transcript.absorb(b"U", U);
|
||||
|
||||
let (num_rounds_x, num_rounds_y) = (
|
||||
(vk.S.num_cons as f64).log2() as usize,
|
||||
((vk.S.num_vars as f64).log2() as usize + 1),
|
||||
(vk.num_cons as f64).log2() as usize,
|
||||
((vk.num_vars as f64).log2() as usize + 1),
|
||||
);
|
||||
|
||||
// outer sum-check
|
||||
@@ -333,37 +427,21 @@ impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G
|
||||
.map(|i| (i + 1, U.X[i]))
|
||||
.collect::<Vec<(usize, G::Scalar)>>(),
|
||||
);
|
||||
SparsePolynomial::new((vk.S.num_vars as f64).log2() as usize, poly_X).evaluate(&r_y[1..])
|
||||
SparsePolynomial::new((vk.num_vars as f64).log2() as usize, poly_X).evaluate(&r_y[1..])
|
||||
};
|
||||
(G::Scalar::one() - r_y[0]) * self.eval_W + r_y[0] * eval_X
|
||||
};
|
||||
|
||||
let evaluate_as_sparse_polynomial = |S: &R1CSShape<G>,
|
||||
r_x: &[G::Scalar],
|
||||
r_y: &[G::Scalar]|
|
||||
-> (G::Scalar, G::Scalar, G::Scalar) {
|
||||
let evaluate_with_table =
|
||||
|M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar {
|
||||
(0..M.len())
|
||||
.collect::<Vec<usize>>()
|
||||
.par_iter()
|
||||
.map(|&i| {
|
||||
let (row, col, val) = M[i];
|
||||
T_x[row] * T_y[col] * val
|
||||
})
|
||||
.reduce(G::Scalar::zero, |acc, x| acc + x)
|
||||
};
|
||||
// verify evaluation argument to retrieve evaluations of R1CS matrices
|
||||
let (eval_A, eval_B, eval_C) = CC::verify(
|
||||
&vk.vk_ee,
|
||||
&vk.comm,
|
||||
&(&r_x, &r_y),
|
||||
&self.eval_arg_cc,
|
||||
&mut transcript,
|
||||
)?;
|
||||
|
||||
let T_x = EqPolynomial::new(r_x.to_vec()).evals();
|
||||
let T_y = EqPolynomial::new(r_y.to_vec()).evals();
|
||||
let eval_A_r = evaluate_with_table(&S.A, &T_x, &T_y);
|
||||
let eval_B_r = evaluate_with_table(&S.B, &T_x, &T_y);
|
||||
let eval_C_r = evaluate_with_table(&S.C, &T_x, &T_y);
|
||||
(eval_A_r, eval_B_r, eval_C_r)
|
||||
};
|
||||
|
||||
let (eval_A_r, eval_B_r, eval_C_r) = evaluate_as_sparse_polynomial(&vk.S, &r_x, &r_y);
|
||||
let claim_inner_final_expected = (eval_A_r + r * eval_B_r + r * r * eval_C_r) * eval_Z;
|
||||
let claim_inner_final_expected = (eval_A + r * eval_B + r * r * eval_C) * eval_Z;
|
||||
if claim_inner_final != claim_inner_final_expected {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
use core::ops::Index;
|
||||
use ff::PrimeField;
|
||||
use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub(crate) struct EqPolynomial<Scalar: PrimeField> {
|
||||
r: Vec<Scalar>,
|
||||
@@ -45,8 +46,8 @@ impl<Scalar: PrimeField> EqPolynomial<Scalar> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct MultilinearPolynomial<Scalar: PrimeField> {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MultilinearPolynomial<Scalar: PrimeField> {
|
||||
num_vars: usize, // the number of variables in the multilinear polynomial
|
||||
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
|
||||
}
|
||||
@@ -97,6 +98,23 @@ impl<Scalar: PrimeField> MultilinearPolynomial<Scalar> {
|
||||
.map(|i| chis[i] * self.Z[i])
|
||||
.reduce(Scalar::zero, |x, y| x + y)
|
||||
}
|
||||
|
||||
pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar {
|
||||
EqPolynomial::new(r.to_vec())
|
||||
.evals()
|
||||
.into_par_iter()
|
||||
.zip(Z.into_par_iter())
|
||||
.map(|(a, b)| a * b)
|
||||
.reduce(Scalar::zero, |x, y| x + y)
|
||||
}
|
||||
|
||||
pub fn split(&self, idx: usize) -> (Self, Self) {
|
||||
assert!(idx < self.len());
|
||||
(
|
||||
Self::new(self.Z[..idx].to_vec()),
|
||||
Self::new(self.Z[idx..2 * idx].to_vec()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Scalar: PrimeField> Index<usize> for MultilinearPolynomial<Scalar> {
|
||||
|
||||
220
src/spartan/spark/mod.rs
Normal file
220
src/spartan/spark/mod.rs
Normal file
@@ -0,0 +1,220 @@
|
||||
//! This module implements `CompCommitmentEngineTrait` using Spartan's SPARK compiler
|
||||
//! We also provide a trivial implementation that has the verifier evaluate the sparse polynomials
|
||||
use crate::{
|
||||
errors::NovaError,
|
||||
r1cs::R1CSShape,
|
||||
spartan::{math::Math, CompCommitmentEngineTrait},
|
||||
traits::{evaluation::EvaluationEngineTrait, Group, TranscriptReprTrait},
|
||||
CommitmentKey,
|
||||
};
|
||||
use core::marker::PhantomData;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// A trivial implementation of `ComputationCommitmentEngineTrait`
|
||||
pub struct TrivialCompComputationEngine<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
_p: PhantomData<G>,
|
||||
_p2: PhantomData<EE>,
|
||||
}
|
||||
|
||||
/// Provides an implementation of a trivial commitment
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct TrivialCommitment<G: Group> {
|
||||
S: R1CSShape<G>,
|
||||
}
|
||||
|
||||
/// Provides an implementation of a trivial decommitment
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct TrivialDecommitment<G: Group> {
|
||||
_p: PhantomData<G>,
|
||||
}
|
||||
|
||||
/// Provides an implementation of a trivial evaluation argument
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct TrivialEvaluationArgument<G: Group> {
|
||||
_p: PhantomData<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> TranscriptReprTrait<G> for TrivialCommitment<G> {
|
||||
fn to_transcript_bytes(&self) -> Vec<u8> {
|
||||
self.S.to_transcript_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> CompCommitmentEngineTrait<G, EE>
|
||||
for TrivialCompComputationEngine<G, EE>
|
||||
{
|
||||
type Decommitment = TrivialDecommitment<G>;
|
||||
type Commitment = TrivialCommitment<G>;
|
||||
type EvaluationArgument = TrivialEvaluationArgument<G>;
|
||||
|
||||
/// commits to R1CS matrices
|
||||
fn commit(
|
||||
_ck: &CommitmentKey<G>,
|
||||
S: &R1CSShape<G>,
|
||||
) -> Result<(Self::Commitment, Self::Decommitment), NovaError> {
|
||||
Ok((
|
||||
TrivialCommitment { S: S.clone() },
|
||||
TrivialDecommitment {
|
||||
_p: Default::default(),
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
/// proves an evaluation of R1CS matrices viewed as polynomials
|
||||
fn prove(
|
||||
_ck: &CommitmentKey<G>,
|
||||
_ek: &EE::ProverKey,
|
||||
_S: &R1CSShape<G>,
|
||||
_decomm: &Self::Decommitment,
|
||||
_comm: &Self::Commitment,
|
||||
_r: &(&[G::Scalar], &[G::Scalar]),
|
||||
_transcript: &mut G::TE,
|
||||
) -> Result<Self::EvaluationArgument, NovaError> {
|
||||
Ok(TrivialEvaluationArgument {
|
||||
_p: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// verifies an evaluation of R1CS matrices viewed as polynomials
|
||||
fn verify(
|
||||
_vk: &EE::VerifierKey,
|
||||
comm: &Self::Commitment,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
_arg: &Self::EvaluationArgument,
|
||||
_transcript: &mut G::TE,
|
||||
) -> Result<(G::Scalar, G::Scalar, G::Scalar), NovaError> {
|
||||
let (r_x, r_y) = r;
|
||||
let evals = SparsePolynomial::<G>::multi_evaluate(&[&comm.S.A, &comm.S.B, &comm.S.C], r_x, r_y);
|
||||
Ok((evals[0], evals[1], evals[2]))
|
||||
}
|
||||
}
|
||||
|
||||
mod product;
|
||||
mod sparse;
|
||||
|
||||
use sparse::{SparseEvaluationArgument, SparsePolynomial, SparsePolynomialCommitment};
|
||||
|
||||
/// A non-trivial implementation of `CompCommitmentEngineTrait` using Spartan's SPARK compiler
|
||||
pub struct SparkEngine<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
_p: PhantomData<G>,
|
||||
_p2: PhantomData<EE>,
|
||||
}
|
||||
|
||||
/// An implementation of Spark decommitment
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct SparkDecommitment<G: Group> {
|
||||
A: SparsePolynomial<G>,
|
||||
B: SparsePolynomial<G>,
|
||||
C: SparsePolynomial<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> SparkDecommitment<G> {
|
||||
fn new(S: &R1CSShape<G>) -> Self {
|
||||
let ell = (S.num_cons.log_2(), S.num_vars.log_2() + 1);
|
||||
let A = SparsePolynomial::new(ell, &S.A);
|
||||
let B = SparsePolynomial::new(ell, &S.B);
|
||||
let C = SparsePolynomial::new(ell, &S.C);
|
||||
|
||||
Self { A, B, C }
|
||||
}
|
||||
|
||||
fn commit(&self, ck: &CommitmentKey<G>) -> SparkCommitment<G> {
|
||||
let comm_A = self.A.commit(ck);
|
||||
let comm_B = self.B.commit(ck);
|
||||
let comm_C = self.C.commit(ck);
|
||||
|
||||
SparkCommitment {
|
||||
comm_A,
|
||||
comm_B,
|
||||
comm_C,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An implementation of Spark commitment
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct SparkCommitment<G: Group> {
|
||||
comm_A: SparsePolynomialCommitment<G>,
|
||||
comm_B: SparsePolynomialCommitment<G>,
|
||||
comm_C: SparsePolynomialCommitment<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> TranscriptReprTrait<G> for SparkCommitment<G> {
|
||||
fn to_transcript_bytes(&self) -> Vec<u8> {
|
||||
let mut bytes = self.comm_A.to_transcript_bytes();
|
||||
bytes.extend(self.comm_B.to_transcript_bytes());
|
||||
bytes.extend(self.comm_C.to_transcript_bytes());
|
||||
bytes
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides an implementation of a trivial evaluation argument
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct SparkEvaluationArgument<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
arg_A: SparseEvaluationArgument<G, EE>,
|
||||
arg_B: SparseEvaluationArgument<G, EE>,
|
||||
arg_C: SparseEvaluationArgument<G, EE>,
|
||||
}
|
||||
|
||||
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> CompCommitmentEngineTrait<G, EE>
|
||||
for SparkEngine<G, EE>
|
||||
{
|
||||
type Decommitment = SparkDecommitment<G>;
|
||||
type Commitment = SparkCommitment<G>;
|
||||
type EvaluationArgument = SparkEvaluationArgument<G, EE>;
|
||||
|
||||
/// commits to R1CS matrices
|
||||
fn commit(
|
||||
ck: &CommitmentKey<G>,
|
||||
S: &R1CSShape<G>,
|
||||
) -> Result<(Self::Commitment, Self::Decommitment), NovaError> {
|
||||
let sparse = SparkDecommitment::new(S);
|
||||
let comm = sparse.commit(ck);
|
||||
Ok((comm, sparse))
|
||||
}
|
||||
|
||||
/// proves an evaluation of R1CS matrices viewed as polynomials
|
||||
fn prove(
|
||||
ck: &CommitmentKey<G>,
|
||||
pk_ee: &EE::ProverKey,
|
||||
S: &R1CSShape<G>,
|
||||
decomm: &Self::Decommitment,
|
||||
comm: &Self::Commitment,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<Self::EvaluationArgument, NovaError> {
|
||||
let arg_A =
|
||||
SparseEvaluationArgument::prove(ck, pk_ee, &decomm.A, &S.A, &comm.comm_A, r, transcript)?;
|
||||
let arg_B =
|
||||
SparseEvaluationArgument::prove(ck, pk_ee, &decomm.B, &S.B, &comm.comm_B, r, transcript)?;
|
||||
let arg_C =
|
||||
SparseEvaluationArgument::prove(ck, pk_ee, &decomm.C, &S.C, &comm.comm_C, r, transcript)?;
|
||||
|
||||
Ok(SparkEvaluationArgument {
|
||||
arg_A,
|
||||
arg_B,
|
||||
arg_C,
|
||||
})
|
||||
}
|
||||
|
||||
/// verifies an evaluation of R1CS matrices viewed as polynomials
|
||||
fn verify(
|
||||
vk_ee: &EE::VerifierKey,
|
||||
comm: &Self::Commitment,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
arg: &Self::EvaluationArgument,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(G::Scalar, G::Scalar, G::Scalar), NovaError> {
|
||||
let eval_A = arg.arg_A.verify(vk_ee, &comm.comm_A, r, transcript)?;
|
||||
let eval_B = arg.arg_B.verify(vk_ee, &comm.comm_B, r, transcript)?;
|
||||
let eval_C = arg.arg_C.verify(vk_ee, &comm.comm_C, r, transcript)?;
|
||||
|
||||
Ok((eval_A, eval_B, eval_C))
|
||||
}
|
||||
}
|
||||
477
src/spartan/spark/product.rs
Normal file
477
src/spartan/spark/product.rs
Normal file
@@ -0,0 +1,477 @@
|
||||
use crate::{
|
||||
errors::NovaError,
|
||||
spartan::{
|
||||
math::Math,
|
||||
polynomial::{EqPolynomial, MultilinearPolynomial},
|
||||
sumcheck::{CompressedUniPoly, SumcheckProof, UniPoly},
|
||||
},
|
||||
traits::{Group, TranscriptEngineTrait},
|
||||
};
|
||||
use core::marker::PhantomData;
|
||||
use ff::{Field, PrimeField};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub(crate) struct IdentityPolynomial<Scalar: PrimeField> {
|
||||
ell: usize,
|
||||
_p: PhantomData<Scalar>,
|
||||
}
|
||||
|
||||
impl<Scalar: PrimeField> IdentityPolynomial<Scalar> {
|
||||
pub fn new(ell: usize) -> Self {
|
||||
IdentityPolynomial {
|
||||
ell,
|
||||
_p: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
assert_eq!(self.ell, r.len());
|
||||
(0..self.ell)
|
||||
.map(|i| Scalar::from(2_usize.pow((self.ell - i - 1) as u32) as u64) * r[i])
|
||||
.fold(Scalar::zero(), |acc, item| acc + item)
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group> SumcheckProof<G> {
|
||||
pub fn prove_cubic<F>(
|
||||
claim: &G::Scalar,
|
||||
num_rounds: usize,
|
||||
poly_A: &mut MultilinearPolynomial<G::Scalar>,
|
||||
poly_B: &mut MultilinearPolynomial<G::Scalar>,
|
||||
poly_C: &mut MultilinearPolynomial<G::Scalar>,
|
||||
comb_func: F,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(Self, Vec<G::Scalar>, Vec<G::Scalar>), NovaError>
|
||||
where
|
||||
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar,
|
||||
{
|
||||
let mut e = *claim;
|
||||
let mut r: Vec<G::Scalar> = Vec::new();
|
||||
let mut cubic_polys: Vec<CompressedUniPoly<G>> = Vec::new();
|
||||
for _j in 0..num_rounds {
|
||||
let mut eval_point_0 = G::Scalar::zero();
|
||||
let mut eval_point_2 = G::Scalar::zero();
|
||||
let mut eval_point_3 = G::Scalar::zero();
|
||||
|
||||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
}
|
||||
|
||||
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
transcript.absorb(b"p", &poly);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_i = transcript.squeeze(b"c")?;
|
||||
r.push(r_i);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
poly_A.bound_poly_var_top(&r_i);
|
||||
poly_B.bound_poly_var_top(&r_i);
|
||||
poly_C.bound_poly_var_top(&r_i);
|
||||
e = poly.evaluate(&r_i);
|
||||
cubic_polys.push(poly.compress());
|
||||
}
|
||||
|
||||
Ok((
|
||||
Self::new(cubic_polys),
|
||||
r,
|
||||
vec![poly_A[0], poly_B[0], poly_C[0]],
|
||||
))
|
||||
}
|
||||
|
||||
pub fn prove_cubic_batched<F>(
|
||||
claim: &G::Scalar,
|
||||
num_rounds: usize,
|
||||
poly_vec: (
|
||||
&mut Vec<&mut MultilinearPolynomial<G::Scalar>>,
|
||||
&mut Vec<&mut MultilinearPolynomial<G::Scalar>>,
|
||||
&mut MultilinearPolynomial<G::Scalar>,
|
||||
),
|
||||
coeffs: &[G::Scalar],
|
||||
comb_func: F,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<
|
||||
(
|
||||
Self,
|
||||
Vec<G::Scalar>,
|
||||
(Vec<G::Scalar>, Vec<G::Scalar>, G::Scalar),
|
||||
),
|
||||
NovaError,
|
||||
>
|
||||
where
|
||||
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar,
|
||||
{
|
||||
let (poly_A_vec, poly_B_vec, poly_C) = poly_vec;
|
||||
|
||||
let mut e = *claim;
|
||||
let mut r: Vec<G::Scalar> = Vec::new();
|
||||
let mut cubic_polys: Vec<CompressedUniPoly<G>> = Vec::new();
|
||||
|
||||
for _j in 0..num_rounds {
|
||||
let mut evals: Vec<(G::Scalar, G::Scalar, G::Scalar)> = Vec::new();
|
||||
|
||||
for (poly_A, poly_B) in poly_A_vec.iter().zip(poly_B_vec.iter()) {
|
||||
let mut eval_point_0 = G::Scalar::zero();
|
||||
let mut eval_point_2 = G::Scalar::zero();
|
||||
let mut eval_point_3 = G::Scalar::zero();
|
||||
|
||||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
);
|
||||
}
|
||||
|
||||
evals.push((eval_point_0, eval_point_2, eval_point_3));
|
||||
}
|
||||
|
||||
let evals_combined_0 = (0..evals.len())
|
||||
.map(|i| evals[i].0 * coeffs[i])
|
||||
.fold(G::Scalar::zero(), |acc, item| acc + item);
|
||||
let evals_combined_2 = (0..evals.len())
|
||||
.map(|i| evals[i].1 * coeffs[i])
|
||||
.fold(G::Scalar::zero(), |acc, item| acc + item);
|
||||
let evals_combined_3 = (0..evals.len())
|
||||
.map(|i| evals[i].2 * coeffs[i])
|
||||
.fold(G::Scalar::zero(), |acc, item| acc + item);
|
||||
|
||||
let evals = vec![
|
||||
evals_combined_0,
|
||||
e - evals_combined_0,
|
||||
evals_combined_2,
|
||||
evals_combined_3,
|
||||
];
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
transcript.absorb(b"p", &poly);
|
||||
|
||||
// derive the verifier's challenge for the next round
|
||||
let r_i = transcript.squeeze(b"c")?;
|
||||
r.push(r_i);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
for (poly_A, poly_B) in poly_A_vec.iter_mut().zip(poly_B_vec.iter_mut()) {
|
||||
poly_A.bound_poly_var_top(&r_i);
|
||||
poly_B.bound_poly_var_top(&r_i);
|
||||
}
|
||||
poly_C.bound_poly_var_top(&r_i);
|
||||
|
||||
e = poly.evaluate(&r_i);
|
||||
cubic_polys.push(poly.compress());
|
||||
}
|
||||
|
||||
let poly_A_final = (0..poly_A_vec.len()).map(|i| poly_A_vec[i][0]).collect();
|
||||
let poly_B_final = (0..poly_B_vec.len()).map(|i| poly_B_vec[i][0]).collect();
|
||||
let claims_prod = (poly_A_final, poly_B_final, poly_C[0]);
|
||||
|
||||
Ok((SumcheckProof::new(cubic_polys), r, claims_prod))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProductArgumentInputs<G: Group> {
|
||||
left_vec: Vec<MultilinearPolynomial<G::Scalar>>,
|
||||
right_vec: Vec<MultilinearPolynomial<G::Scalar>>,
|
||||
}
|
||||
|
||||
impl<G: Group> ProductArgumentInputs<G> {
|
||||
fn compute_layer(
|
||||
inp_left: &MultilinearPolynomial<G::Scalar>,
|
||||
inp_right: &MultilinearPolynomial<G::Scalar>,
|
||||
) -> (
|
||||
MultilinearPolynomial<G::Scalar>,
|
||||
MultilinearPolynomial<G::Scalar>,
|
||||
) {
|
||||
let len = inp_left.len() + inp_right.len();
|
||||
let outp_left = (0..len / 4)
|
||||
.map(|i| inp_left[i] * inp_right[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let outp_right = (len / 4..len / 2)
|
||||
.map(|i| inp_left[i] * inp_right[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
(
|
||||
MultilinearPolynomial::new(outp_left),
|
||||
MultilinearPolynomial::new(outp_right),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new(poly: &MultilinearPolynomial<G::Scalar>) -> Self {
|
||||
let mut left_vec: Vec<MultilinearPolynomial<G::Scalar>> = Vec::new();
|
||||
let mut right_vec: Vec<MultilinearPolynomial<G::Scalar>> = Vec::new();
|
||||
let num_layers = poly.len().log_2();
|
||||
let (outp_left, outp_right) = poly.split(poly.len() / 2);
|
||||
|
||||
left_vec.push(outp_left);
|
||||
right_vec.push(outp_right);
|
||||
|
||||
for i in 0..num_layers - 1 {
|
||||
let (outp_left, outp_right) =
|
||||
ProductArgumentInputs::<G>::compute_layer(&left_vec[i], &right_vec[i]);
|
||||
left_vec.push(outp_left);
|
||||
right_vec.push(outp_right);
|
||||
}
|
||||
|
||||
Self {
|
||||
left_vec,
|
||||
right_vec,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn evaluate(&self) -> G::Scalar {
|
||||
let len = self.left_vec.len();
|
||||
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
|
||||
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
|
||||
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
|
||||
}
|
||||
}
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct LayerProofBatched<G: Group> {
|
||||
proof: SumcheckProof<G>,
|
||||
claims_prod_left: Vec<G::Scalar>,
|
||||
claims_prod_right: Vec<G::Scalar>,
|
||||
}
|
||||
|
||||
impl<G: Group> LayerProofBatched<G> {
|
||||
pub fn verify(
|
||||
&self,
|
||||
claim: G::Scalar,
|
||||
num_rounds: usize,
|
||||
degree_bound: usize,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(G::Scalar, Vec<G::Scalar>), NovaError> {
|
||||
self
|
||||
.proof
|
||||
.verify(claim, num_rounds, degree_bound, transcript)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub(crate) struct ProductArgumentBatched<G: Group> {
|
||||
proof: Vec<LayerProofBatched<G>>,
|
||||
}
|
||||
|
||||
impl<G: Group> ProductArgumentBatched<G> {
|
||||
pub fn prove(
|
||||
poly_vec: &[&MultilinearPolynomial<G::Scalar>],
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(Self, Vec<G::Scalar>, Vec<G::Scalar>), NovaError> {
|
||||
let mut prod_circuit_vec: Vec<_> = (0..poly_vec.len())
|
||||
.map(|i| ProductArgumentInputs::<G>::new(poly_vec[i]))
|
||||
.collect();
|
||||
|
||||
let mut proof_layers: Vec<LayerProofBatched<G>> = Vec::new();
|
||||
let num_layers = prod_circuit_vec[0].left_vec.len();
|
||||
let evals = (0..prod_circuit_vec.len())
|
||||
.map(|i| prod_circuit_vec[i].evaluate())
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let mut claims_to_verify = evals.clone();
|
||||
let mut rand = Vec::new();
|
||||
for layer_id in (0..num_layers).rev() {
|
||||
let len = prod_circuit_vec[0].left_vec[layer_id].len()
|
||||
+ prod_circuit_vec[0].right_vec[layer_id].len();
|
||||
|
||||
let mut poly_C = MultilinearPolynomial::new(EqPolynomial::new(rand.clone()).evals());
|
||||
assert_eq!(poly_C.len(), len / 2);
|
||||
|
||||
let num_rounds_prod = poly_C.len().log_2();
|
||||
let comb_func_prod = |poly_A_comp: &G::Scalar,
|
||||
poly_B_comp: &G::Scalar,
|
||||
poly_C_comp: &G::Scalar|
|
||||
-> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp };
|
||||
|
||||
let mut poly_A_batched: Vec<&mut MultilinearPolynomial<G::Scalar>> = Vec::new();
|
||||
let mut poly_B_batched: Vec<&mut MultilinearPolynomial<G::Scalar>> = Vec::new();
|
||||
for prod_circuit in prod_circuit_vec.iter_mut() {
|
||||
poly_A_batched.push(&mut prod_circuit.left_vec[layer_id]);
|
||||
poly_B_batched.push(&mut prod_circuit.right_vec[layer_id])
|
||||
}
|
||||
let poly_vec = (&mut poly_A_batched, &mut poly_B_batched, &mut poly_C);
|
||||
|
||||
// produce a fresh set of coeffs and a joint claim
|
||||
let coeff_vec = {
|
||||
let s = transcript.squeeze(b"r")?;
|
||||
let mut s_vec = vec![s];
|
||||
for i in 1..claims_to_verify.len() {
|
||||
s_vec.push(s_vec[i - 1] * s);
|
||||
}
|
||||
s_vec
|
||||
};
|
||||
|
||||
let claim = (0..claims_to_verify.len())
|
||||
.map(|i| claims_to_verify[i] * coeff_vec[i])
|
||||
.fold(G::Scalar::zero(), |acc, item| acc + item);
|
||||
|
||||
let (proof, rand_prod, claims_prod) = SumcheckProof::prove_cubic_batched(
|
||||
&claim,
|
||||
num_rounds_prod,
|
||||
poly_vec,
|
||||
&coeff_vec,
|
||||
comb_func_prod,
|
||||
transcript,
|
||||
)?;
|
||||
|
||||
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
|
||||
|
||||
let v = {
|
||||
let mut v = claims_prod_left.clone();
|
||||
v.extend(&claims_prod_right);
|
||||
v
|
||||
};
|
||||
transcript.absorb(b"p", &v.as_slice());
|
||||
|
||||
// produce a random challenge to condense two claims into a single claim
|
||||
let r_layer = transcript.squeeze(b"c")?;
|
||||
|
||||
claims_to_verify = (0..prod_circuit_vec.len())
|
||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
rand = ext;
|
||||
|
||||
proof_layers.push(LayerProofBatched {
|
||||
proof,
|
||||
claims_prod_left,
|
||||
claims_prod_right,
|
||||
});
|
||||
}
|
||||
|
||||
Ok((
|
||||
ProductArgumentBatched {
|
||||
proof: proof_layers,
|
||||
},
|
||||
evals,
|
||||
rand,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
claims_prod_vec: &[G::Scalar],
|
||||
len: usize,
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<(Vec<G::Scalar>, Vec<G::Scalar>), NovaError> {
|
||||
let num_layers = len.log_2();
|
||||
|
||||
let mut rand: Vec<G::Scalar> = Vec::new();
|
||||
if self.proof.len() != num_layers {
|
||||
return Err(NovaError::InvalidProductProof);
|
||||
}
|
||||
|
||||
let mut claims_to_verify = claims_prod_vec.to_owned();
|
||||
for (num_rounds, i) in (0..num_layers).enumerate() {
|
||||
// produce random coefficients, one for each instance
|
||||
let coeff_vec = {
|
||||
let s = transcript.squeeze(b"r")?;
|
||||
let mut s_vec = vec![s];
|
||||
for i in 1..claims_to_verify.len() {
|
||||
s_vec.push(s_vec[i - 1] * s);
|
||||
}
|
||||
s_vec
|
||||
};
|
||||
|
||||
// produce a joint claim
|
||||
let claim = (0..claims_to_verify.len())
|
||||
.map(|i| claims_to_verify[i] * coeff_vec[i])
|
||||
.fold(G::Scalar::zero(), |acc, item| acc + item);
|
||||
|
||||
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript)?;
|
||||
|
||||
let claims_prod_left = &self.proof[i].claims_prod_left;
|
||||
let claims_prod_right = &self.proof[i].claims_prod_right;
|
||||
if claims_prod_left.len() != claims_prod_vec.len()
|
||||
|| claims_prod_right.len() != claims_prod_vec.len()
|
||||
{
|
||||
return Err(NovaError::InvalidProductProof);
|
||||
}
|
||||
|
||||
let v = {
|
||||
let mut v = claims_prod_left.clone();
|
||||
v.extend(claims_prod_right);
|
||||
v
|
||||
};
|
||||
transcript.absorb(b"p", &v.as_slice());
|
||||
|
||||
if rand.len() != rand_prod.len() {
|
||||
return Err(NovaError::InvalidProductProof);
|
||||
}
|
||||
|
||||
let eq: G::Scalar = (0..rand.len())
|
||||
.map(|i| {
|
||||
rand[i] * rand_prod[i] + (G::Scalar::one() - rand[i]) * (G::Scalar::one() - rand_prod[i])
|
||||
})
|
||||
.fold(G::Scalar::one(), |acc, item| acc * item);
|
||||
let claim_expected: G::Scalar = (0..claims_prod_vec.len())
|
||||
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
|
||||
.fold(G::Scalar::zero(), |acc, item| acc + item);
|
||||
|
||||
if claim_expected != claim_last {
|
||||
return Err(NovaError::InvalidProductProof);
|
||||
}
|
||||
|
||||
// produce a random challenge
|
||||
let r_layer = transcript.squeeze(b"c")?;
|
||||
|
||||
claims_to_verify = (0..claims_prod_left.len())
|
||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
rand = ext;
|
||||
}
|
||||
Ok((claims_to_verify, rand))
|
||||
}
|
||||
}
|
||||
732
src/spartan/spark/sparse.rs
Normal file
732
src/spartan/spark/sparse.rs
Normal file
@@ -0,0 +1,732 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::needless_range_loop)]
|
||||
use crate::{
|
||||
errors::NovaError,
|
||||
spartan::{
|
||||
math::Math,
|
||||
polynomial::{EqPolynomial, MultilinearPolynomial},
|
||||
spark::product::{IdentityPolynomial, ProductArgumentBatched},
|
||||
SumcheckProof,
|
||||
},
|
||||
traits::{
|
||||
commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Group,
|
||||
TranscriptEngineTrait, TranscriptReprTrait,
|
||||
},
|
||||
Commitment, CommitmentKey,
|
||||
};
|
||||
use ff::Field;
|
||||
use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// A type that holds a sparse polynomial in dense representation
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct SparsePolynomial<G: Group> {
|
||||
ell: (usize, usize), // number of variables in each dimension
|
||||
|
||||
// dense representation
|
||||
row: Vec<G::Scalar>,
|
||||
col: Vec<G::Scalar>,
|
||||
val: Vec<G::Scalar>,
|
||||
|
||||
// timestamp polynomials
|
||||
row_read_ts: Vec<G::Scalar>,
|
||||
row_audit_ts: Vec<G::Scalar>,
|
||||
col_read_ts: Vec<G::Scalar>,
|
||||
col_audit_ts: Vec<G::Scalar>,
|
||||
}
|
||||
|
||||
/// A type that holds a commitment to a sparse polynomial
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct SparsePolynomialCommitment<G: Group> {
|
||||
ell: (usize, usize), // number of variables
|
||||
size: usize, // size of the dense representation
|
||||
|
||||
// commitments to the dense representation
|
||||
comm_row: Commitment<G>,
|
||||
comm_col: Commitment<G>,
|
||||
comm_val: Commitment<G>,
|
||||
|
||||
// commitments to the timestamp polynomials
|
||||
comm_row_read_ts: Commitment<G>,
|
||||
comm_row_audit_ts: Commitment<G>,
|
||||
comm_col_read_ts: Commitment<G>,
|
||||
comm_col_audit_ts: Commitment<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> TranscriptReprTrait<G> for SparsePolynomialCommitment<G> {
|
||||
fn to_transcript_bytes(&self) -> Vec<u8> {
|
||||
[
|
||||
self.comm_row,
|
||||
self.comm_col,
|
||||
self.comm_val,
|
||||
self.comm_row_read_ts,
|
||||
self.comm_row_audit_ts,
|
||||
self.comm_col_read_ts,
|
||||
self.comm_col_audit_ts,
|
||||
]
|
||||
.as_slice()
|
||||
.to_transcript_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group> SparsePolynomial<G> {
|
||||
pub fn new(ell: (usize, usize), M: &[(usize, usize, G::Scalar)]) -> Self {
|
||||
let mut row = M.iter().map(|(r, _, _)| *r).collect::<Vec<usize>>();
|
||||
let mut col = M.iter().map(|(_, c, _)| *c).collect::<Vec<usize>>();
|
||||
let mut val = M.iter().map(|(_, _, v)| *v).collect::<Vec<G::Scalar>>();
|
||||
|
||||
let num_ops = M.len().next_power_of_two();
|
||||
let num_cells_row = ell.0.pow2();
|
||||
let num_cells_col = ell.1.pow2();
|
||||
row.resize(num_ops, 0usize);
|
||||
col.resize(num_ops, 0usize);
|
||||
val.resize(num_ops, G::Scalar::zero());
|
||||
|
||||
// timestamp calculation routine
|
||||
let timestamp_calc =
|
||||
|num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> (Vec<usize>, Vec<usize>) {
|
||||
let mut read_ts = vec![0usize; num_ops];
|
||||
let mut audit_ts = vec![0usize; num_cells];
|
||||
|
||||
assert!(num_ops >= addr_trace.len());
|
||||
for i in 0..addr_trace.len() {
|
||||
let addr = addr_trace[i];
|
||||
assert!(addr < num_cells);
|
||||
let r_ts = audit_ts[addr];
|
||||
read_ts[i] = r_ts;
|
||||
|
||||
let w_ts = r_ts + 1;
|
||||
audit_ts[addr] = w_ts;
|
||||
}
|
||||
(read_ts, audit_ts)
|
||||
};
|
||||
|
||||
// timestamp polynomials for row
|
||||
let (row_read_ts, row_audit_ts) = timestamp_calc(num_ops, num_cells_row, &row);
|
||||
let (col_read_ts, col_audit_ts) = timestamp_calc(num_ops, num_cells_col, &col);
|
||||
|
||||
let to_vec_scalar = |v: &[usize]| -> Vec<G::Scalar> {
|
||||
(0..v.len())
|
||||
.map(|i| G::Scalar::from(v[i] as u64))
|
||||
.collect::<Vec<G::Scalar>>()
|
||||
};
|
||||
|
||||
Self {
|
||||
ell,
|
||||
// dense representation
|
||||
row: to_vec_scalar(&row),
|
||||
col: to_vec_scalar(&col),
|
||||
val,
|
||||
|
||||
// timestamp polynomials
|
||||
row_read_ts: to_vec_scalar(&row_read_ts),
|
||||
row_audit_ts: to_vec_scalar(&row_audit_ts),
|
||||
col_read_ts: to_vec_scalar(&col_read_ts),
|
||||
col_audit_ts: to_vec_scalar(&col_audit_ts),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn commit(&self, ck: &CommitmentKey<G>) -> SparsePolynomialCommitment<G> {
|
||||
let comm_vec: Vec<Commitment<G>> = [
|
||||
&self.row,
|
||||
&self.col,
|
||||
&self.val,
|
||||
&self.row_read_ts,
|
||||
&self.row_audit_ts,
|
||||
&self.col_read_ts,
|
||||
&self.col_audit_ts,
|
||||
]
|
||||
.par_iter()
|
||||
.map(|v| G::CE::commit(ck, v))
|
||||
.collect();
|
||||
|
||||
SparsePolynomialCommitment {
|
||||
ell: self.ell,
|
||||
size: self.row.len(),
|
||||
comm_row: comm_vec[0],
|
||||
comm_col: comm_vec[1],
|
||||
comm_val: comm_vec[2],
|
||||
comm_row_read_ts: comm_vec[3],
|
||||
comm_row_audit_ts: comm_vec[4],
|
||||
comm_col_read_ts: comm_vec[5],
|
||||
comm_col_audit_ts: comm_vec[6],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn multi_evaluate(
|
||||
M_vec: &[&[(usize, usize, G::Scalar)]],
|
||||
r_x: &[G::Scalar],
|
||||
r_y: &[G::Scalar],
|
||||
) -> Vec<G::Scalar> {
|
||||
let evaluate_with_table =
|
||||
|M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar {
|
||||
(0..M.len())
|
||||
.collect::<Vec<usize>>()
|
||||
.par_iter()
|
||||
.map(|&i| {
|
||||
let (row, col, val) = M[i];
|
||||
T_x[row] * T_y[col] * val
|
||||
})
|
||||
.reduce(G::Scalar::zero, |acc, x| acc + x)
|
||||
};
|
||||
|
||||
let (T_x, T_y) = rayon::join(
|
||||
|| EqPolynomial::new(r_x.to_vec()).evals(),
|
||||
|| EqPolynomial::new(r_y.to_vec()).evals(),
|
||||
);
|
||||
|
||||
(0..M_vec.len())
|
||||
.collect::<Vec<usize>>()
|
||||
.par_iter()
|
||||
.map(|&i| evaluate_with_table(M_vec[i], &T_x, &T_y))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn evaluation_oracles(
|
||||
M: &[(usize, usize, G::Scalar)],
|
||||
r_x: &[G::Scalar],
|
||||
r_y: &[G::Scalar],
|
||||
) -> (
|
||||
Vec<G::Scalar>,
|
||||
Vec<G::Scalar>,
|
||||
Vec<G::Scalar>,
|
||||
Vec<G::Scalar>,
|
||||
) {
|
||||
let evaluation_oracles_with_table = |M: &[(usize, usize, G::Scalar)],
|
||||
T_x: &[G::Scalar],
|
||||
T_y: &[G::Scalar]|
|
||||
-> (Vec<G::Scalar>, Vec<G::Scalar>) {
|
||||
(0..M.len())
|
||||
.collect::<Vec<usize>>()
|
||||
.par_iter()
|
||||
.map(|&i| {
|
||||
let (row, col, _val) = M[i];
|
||||
(T_x[row], T_y[col])
|
||||
})
|
||||
.collect::<Vec<(G::Scalar, G::Scalar)>>()
|
||||
.into_par_iter()
|
||||
.unzip()
|
||||
};
|
||||
|
||||
let (T_x, T_y) = rayon::join(
|
||||
|| EqPolynomial::new(r_x.to_vec()).evals(),
|
||||
|| EqPolynomial::new(r_y.to_vec()).evals(),
|
||||
);
|
||||
|
||||
let (mut E_row, mut E_col) = evaluation_oracles_with_table(M, &T_x, &T_y);
|
||||
|
||||
// resize the returned vectors
|
||||
E_row.resize(M.len().next_power_of_two(), T_x[0]); // we place T_x[0] since resized row is appended with 0s
|
||||
E_col.resize(M.len().next_power_of_two(), T_y[0]);
|
||||
(E_row, E_col, T_x, T_y)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub struct SparseEvaluationArgument<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
|
||||
// claimed evaluation
|
||||
eval: G::Scalar,
|
||||
|
||||
// oracles
|
||||
comm_E_row: Commitment<G>,
|
||||
comm_E_col: Commitment<G>,
|
||||
|
||||
// proof of correct evaluation wrt oracles
|
||||
sc_proof_eval: SumcheckProof<G>,
|
||||
eval_E_row: G::Scalar,
|
||||
eval_E_col: G::Scalar,
|
||||
eval_val: G::Scalar,
|
||||
arg_eval: EE::EvaluationArgument,
|
||||
|
||||
// proof that E_row is well-formed
|
||||
eval_init_row: G::Scalar,
|
||||
eval_read_row: G::Scalar,
|
||||
eval_write_row: G::Scalar,
|
||||
eval_audit_row: G::Scalar,
|
||||
eval_init_col: G::Scalar,
|
||||
eval_read_col: G::Scalar,
|
||||
eval_write_col: G::Scalar,
|
||||
eval_audit_col: G::Scalar,
|
||||
sc_prod_init_audit_row: ProductArgumentBatched<G>,
|
||||
sc_prod_read_write_row_col: ProductArgumentBatched<G>,
|
||||
sc_prod_init_audit_col: ProductArgumentBatched<G>,
|
||||
eval_row: G::Scalar,
|
||||
eval_row_read_ts: G::Scalar,
|
||||
eval_E_row2: G::Scalar,
|
||||
eval_row_audit_ts: G::Scalar,
|
||||
eval_col: G::Scalar,
|
||||
eval_col_read_ts: G::Scalar,
|
||||
eval_E_col2: G::Scalar,
|
||||
eval_col_audit_ts: G::Scalar,
|
||||
arg_row_col_joint: EE::EvaluationArgument,
|
||||
arg_row_audit_ts: EE::EvaluationArgument,
|
||||
arg_col_audit_ts: EE::EvaluationArgument,
|
||||
}
|
||||
|
||||
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> SparseEvaluationArgument<G, EE> {
|
||||
pub fn prove(
|
||||
ck: &CommitmentKey<G>,
|
||||
pk_ee: &EE::ProverKey,
|
||||
poly: &SparsePolynomial<G>,
|
||||
sparse: &[(usize, usize, G::Scalar)],
|
||||
comm: &SparsePolynomialCommitment<G>,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<Self, NovaError> {
|
||||
let (r_x, r_y) = r;
|
||||
let eval = SparsePolynomial::<G>::multi_evaluate(&[sparse], r_x, r_y)[0];
|
||||
|
||||
// compute oracles to prove the correctness of `eval`
|
||||
let (E_row, E_col, T_x, T_y) = SparsePolynomial::<G>::evaluation_oracles(sparse, r_x, r_y);
|
||||
let val = poly.val.clone();
|
||||
|
||||
// commit to the two oracles
|
||||
let comm_E_row = G::CE::commit(ck, &E_row);
|
||||
let comm_E_col = G::CE::commit(ck, &E_col);
|
||||
|
||||
// absorb the commitments and the claimed evaluation
|
||||
transcript.absorb(b"E", &vec![comm_E_row, comm_E_col].as_slice());
|
||||
transcript.absorb(b"e", &eval);
|
||||
|
||||
let comb_func_eval = |poly_A_comp: &G::Scalar,
|
||||
poly_B_comp: &G::Scalar,
|
||||
poly_C_comp: &G::Scalar|
|
||||
-> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp };
|
||||
let (sc_proof_eval, r_eval, claims_eval) = SumcheckProof::<G>::prove_cubic(
|
||||
&eval,
|
||||
E_row.len().log_2(), // number of rounds
|
||||
&mut MultilinearPolynomial::new(E_row.clone()),
|
||||
&mut MultilinearPolynomial::new(E_col.clone()),
|
||||
&mut MultilinearPolynomial::new(val.clone()),
|
||||
comb_func_eval,
|
||||
transcript,
|
||||
)?;
|
||||
|
||||
// prove evaluations of E_row, E_col and val at r_eval
|
||||
let rho = transcript.squeeze(b"r")?;
|
||||
let comm_joint = comm_E_row + comm_E_col * rho + comm.comm_val * rho * rho;
|
||||
let eval_joint = claims_eval[0] + rho * claims_eval[1] + rho * rho * claims_eval[2];
|
||||
let poly_eval = E_row
|
||||
.iter()
|
||||
.zip(E_col.iter())
|
||||
.zip(val.iter())
|
||||
.map(|((a, b), c)| *a + rho * *b + rho * rho * *c)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let arg_eval = EE::prove(
|
||||
ck,
|
||||
pk_ee,
|
||||
transcript,
|
||||
&comm_joint,
|
||||
&poly_eval,
|
||||
&r_eval,
|
||||
&eval_joint,
|
||||
)?;
|
||||
|
||||
// we now need to prove that E_row and E_col are well-formed
|
||||
// we use memory checking: H(INIT) * H(WS) =? H(RS) * H(FINAL)
|
||||
let gamma_1 = transcript.squeeze(b"g1")?;
|
||||
let gamma_2 = transcript.squeeze(b"g2")?;
|
||||
|
||||
let gamma_1_sqr = gamma_1 * gamma_1;
|
||||
let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar {
|
||||
(*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2
|
||||
};
|
||||
|
||||
let init_row = (0..T_x.len())
|
||||
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_x[i], &G::Scalar::zero()))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let read_row = (0..E_row.len())
|
||||
.map(|i| hash_func(&poly.row[i], &E_row[i], &poly.row_read_ts[i]))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let write_row = (0..E_row.len())
|
||||
.map(|i| {
|
||||
hash_func(
|
||||
&poly.row[i],
|
||||
&E_row[i],
|
||||
&(poly.row_read_ts[i] + G::Scalar::one()),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let audit_row = (0..T_x.len())
|
||||
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_x[i], &poly.row_audit_ts[i]))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let init_col = (0..T_y.len())
|
||||
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_y[i], &G::Scalar::zero()))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let read_col = (0..E_col.len())
|
||||
.map(|i| hash_func(&poly.col[i], &E_col[i], &poly.col_read_ts[i]))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let write_col = (0..E_col.len())
|
||||
.map(|i| {
|
||||
hash_func(
|
||||
&poly.col[i],
|
||||
&E_col[i],
|
||||
&(poly.col_read_ts[i] + G::Scalar::one()),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let audit_col = (0..T_y.len())
|
||||
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_y[i], &poly.col_audit_ts[i]))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let (sc_prod_init_audit_row, eval_init_audit_row, r_init_audit_row) =
|
||||
ProductArgumentBatched::prove(
|
||||
&[
|
||||
&MultilinearPolynomial::new(init_row),
|
||||
&MultilinearPolynomial::new(audit_row),
|
||||
],
|
||||
transcript,
|
||||
)?;
|
||||
|
||||
assert_eq!(init_col.len(), audit_col.len());
|
||||
let (sc_prod_init_audit_col, eval_init_audit_col, r_init_audit_col) =
|
||||
ProductArgumentBatched::prove(
|
||||
&[
|
||||
&MultilinearPolynomial::new(init_col),
|
||||
&MultilinearPolynomial::new(audit_col),
|
||||
],
|
||||
transcript,
|
||||
)?;
|
||||
|
||||
assert_eq!(read_row.len(), write_row.len());
|
||||
assert_eq!(read_row.len(), read_col.len());
|
||||
assert_eq!(read_row.len(), write_col.len());
|
||||
|
||||
let (sc_prod_read_write_row_col, eval_read_write_row_col, r_read_write_row_col) =
|
||||
ProductArgumentBatched::prove(
|
||||
&[
|
||||
&MultilinearPolynomial::new(read_row),
|
||||
&MultilinearPolynomial::new(write_row),
|
||||
&MultilinearPolynomial::new(read_col),
|
||||
&MultilinearPolynomial::new(write_col),
|
||||
],
|
||||
transcript,
|
||||
)?;
|
||||
|
||||
// row-related claims of polynomial evaluations to aid the final check of the sum-check
|
||||
let eval_row = MultilinearPolynomial::evaluate_with(&poly.row, &r_read_write_row_col);
|
||||
let eval_row_read_ts =
|
||||
MultilinearPolynomial::evaluate_with(&poly.row_read_ts, &r_read_write_row_col);
|
||||
let eval_E_row2 = MultilinearPolynomial::evaluate_with(&E_row, &r_read_write_row_col);
|
||||
let eval_row_audit_ts =
|
||||
MultilinearPolynomial::evaluate_with(&poly.row_audit_ts, &r_init_audit_row);
|
||||
|
||||
// col-related claims of polynomial evaluations to aid the final check of the sum-check
|
||||
let eval_col = MultilinearPolynomial::evaluate_with(&poly.col, &r_read_write_row_col);
|
||||
let eval_col_read_ts =
|
||||
MultilinearPolynomial::evaluate_with(&poly.col_read_ts, &r_read_write_row_col);
|
||||
let eval_E_col2 = MultilinearPolynomial::evaluate_with(&E_col, &r_read_write_row_col);
|
||||
let eval_col_audit_ts =
|
||||
MultilinearPolynomial::evaluate_with(&poly.col_audit_ts, &r_init_audit_col);
|
||||
|
||||
// we can batch prove the first three claims
|
||||
transcript.absorb(
|
||||
b"e",
|
||||
&[
|
||||
eval_row,
|
||||
eval_row_read_ts,
|
||||
eval_E_row2,
|
||||
eval_col,
|
||||
eval_col_read_ts,
|
||||
eval_E_col2,
|
||||
]
|
||||
.as_slice(),
|
||||
);
|
||||
let c = transcript.squeeze(b"c")?;
|
||||
let eval_joint = eval_row
|
||||
+ c * eval_row_read_ts
|
||||
+ c * c * eval_E_row2
|
||||
+ c * c * c * eval_col
|
||||
+ c * c * c * c * eval_col_read_ts
|
||||
+ c * c * c * c * c * eval_E_col2;
|
||||
let comm_joint = comm.comm_row
|
||||
+ comm.comm_row_read_ts * c
|
||||
+ comm_E_row * c * c
|
||||
+ comm.comm_col * c * c * c
|
||||
+ comm.comm_col_read_ts * c * c * c * c
|
||||
+ comm_E_col * c * c * c * c * c;
|
||||
let poly_joint = poly
|
||||
.row
|
||||
.iter()
|
||||
.zip(poly.row_read_ts.iter())
|
||||
.zip(E_row.into_iter())
|
||||
.zip(poly.col.iter())
|
||||
.zip(poly.col_read_ts.iter())
|
||||
.zip(E_col.into_iter())
|
||||
.map(|(((((x, y), z), m), n), q)| {
|
||||
*x + c * y + c * c * z + c * c * c * m + c * c * c * c * n + c * c * c * c * c * q
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let arg_row_col_joint = EE::prove(
|
||||
ck,
|
||||
pk_ee,
|
||||
transcript,
|
||||
&comm_joint,
|
||||
&poly_joint,
|
||||
&r_read_write_row_col,
|
||||
&eval_joint,
|
||||
)?;
|
||||
|
||||
let arg_row_audit_ts = EE::prove(
|
||||
ck,
|
||||
pk_ee,
|
||||
transcript,
|
||||
&comm.comm_row_audit_ts,
|
||||
&poly.row_audit_ts,
|
||||
&r_init_audit_row,
|
||||
&eval_row_audit_ts,
|
||||
)?;
|
||||
|
||||
let arg_col_audit_ts = EE::prove(
|
||||
ck,
|
||||
pk_ee,
|
||||
transcript,
|
||||
&comm.comm_col_audit_ts,
|
||||
&poly.col_audit_ts,
|
||||
&r_init_audit_col,
|
||||
&eval_col_audit_ts,
|
||||
)?;
|
||||
|
||||
Ok(Self {
|
||||
// claimed evaluation
|
||||
eval,
|
||||
|
||||
// oracles
|
||||
comm_E_row,
|
||||
comm_E_col,
|
||||
|
||||
// proof of correct evaluation wrt oracles
|
||||
sc_proof_eval,
|
||||
eval_E_row: claims_eval[0],
|
||||
eval_E_col: claims_eval[1],
|
||||
eval_val: claims_eval[2],
|
||||
arg_eval,
|
||||
|
||||
// proof that E_row and E_row are well-formed
|
||||
eval_init_row: eval_init_audit_row[0],
|
||||
eval_read_row: eval_read_write_row_col[0],
|
||||
eval_write_row: eval_read_write_row_col[1],
|
||||
eval_audit_row: eval_init_audit_row[1],
|
||||
eval_init_col: eval_init_audit_col[0],
|
||||
eval_read_col: eval_read_write_row_col[2],
|
||||
eval_write_col: eval_read_write_row_col[3],
|
||||
eval_audit_col: eval_init_audit_col[1],
|
||||
sc_prod_init_audit_row,
|
||||
sc_prod_read_write_row_col,
|
||||
sc_prod_init_audit_col,
|
||||
eval_row,
|
||||
eval_row_read_ts,
|
||||
eval_E_row2,
|
||||
eval_row_audit_ts,
|
||||
eval_col,
|
||||
eval_col_read_ts,
|
||||
eval_E_col2,
|
||||
eval_col_audit_ts,
|
||||
arg_row_col_joint,
|
||||
arg_row_audit_ts,
|
||||
arg_col_audit_ts,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
vk_ee: &EE::VerifierKey,
|
||||
comm: &SparsePolynomialCommitment<G>,
|
||||
r: &(&[G::Scalar], &[G::Scalar]),
|
||||
transcript: &mut G::TE,
|
||||
) -> Result<G::Scalar, NovaError> {
|
||||
let (r_x, r_y) = r;
|
||||
|
||||
// append the transcript and scalar
|
||||
transcript.absorb(b"E", &vec![self.comm_E_row, self.comm_E_col].as_slice());
|
||||
transcript.absorb(b"e", &self.eval);
|
||||
|
||||
// (1) verify the correct evaluation of sparse polynomial
|
||||
let (claim_eval_final, r_eval) = self.sc_proof_eval.verify(
|
||||
self.eval,
|
||||
comm.size.next_power_of_two().log_2(),
|
||||
3,
|
||||
transcript,
|
||||
)?;
|
||||
// verify the last step of the sum-check
|
||||
if claim_eval_final != self.eval_E_row * self.eval_E_col * self.eval_val {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
// prove evaluations of E_row, E_col and val at r_eval
|
||||
let rho = transcript.squeeze(b"r")?;
|
||||
let comm_joint = self.comm_E_row + self.comm_E_col * rho + comm.comm_val * rho * rho;
|
||||
let eval_joint = self.eval_E_row + rho * self.eval_E_col + rho * rho * self.eval_val;
|
||||
EE::verify(
|
||||
vk_ee,
|
||||
transcript,
|
||||
&comm_joint,
|
||||
&r_eval,
|
||||
&eval_joint,
|
||||
&self.arg_eval,
|
||||
)?;
|
||||
|
||||
// (2) verify if E_row and E_col are well formed
|
||||
let gamma_1 = transcript.squeeze(b"g1")?;
|
||||
let gamma_2 = transcript.squeeze(b"g2")?;
|
||||
|
||||
// hash function
|
||||
let gamma_1_sqr = gamma_1 * gamma_1;
|
||||
let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar {
|
||||
(*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2
|
||||
};
|
||||
|
||||
// check the required multiset relationship
|
||||
// row
|
||||
if self.eval_init_row * self.eval_write_row != self.eval_read_row * self.eval_audit_row {
|
||||
return Err(NovaError::InvalidMultisetProof);
|
||||
}
|
||||
// col
|
||||
if self.eval_init_col * self.eval_write_col != self.eval_read_col * self.eval_audit_col {
|
||||
return Err(NovaError::InvalidMultisetProof);
|
||||
}
|
||||
|
||||
// verify the product proofs
|
||||
let (claim_init_audit_row, r_init_audit_row) = self.sc_prod_init_audit_row.verify(
|
||||
&[self.eval_init_row, self.eval_audit_row],
|
||||
comm.ell.0.pow2(),
|
||||
transcript,
|
||||
)?;
|
||||
let (claim_init_audit_col, r_init_audit_col) = self.sc_prod_init_audit_col.verify(
|
||||
&[self.eval_init_col, self.eval_audit_col],
|
||||
comm.ell.1.pow2(),
|
||||
transcript,
|
||||
)?;
|
||||
let (claim_read_write_row_col, r_read_write_row_col) = self.sc_prod_read_write_row_col.verify(
|
||||
&[
|
||||
self.eval_read_row,
|
||||
self.eval_write_row,
|
||||
self.eval_read_col,
|
||||
self.eval_write_col,
|
||||
],
|
||||
comm.size,
|
||||
transcript,
|
||||
)?;
|
||||
|
||||
// finish the final step of the three sum-checks
|
||||
let (claim_init_expected_row, claim_audit_expected_row) = {
|
||||
let addr = IdentityPolynomial::new(r_init_audit_row.len()).evaluate(&r_init_audit_row);
|
||||
let val = EqPolynomial::new(r_x.to_vec()).evaluate(&r_init_audit_row);
|
||||
|
||||
(
|
||||
hash_func(&addr, &val, &G::Scalar::zero()),
|
||||
hash_func(&addr, &val, &self.eval_row_audit_ts),
|
||||
)
|
||||
};
|
||||
|
||||
let (claim_read_expected_row, claim_write_expected_row) = {
|
||||
(
|
||||
hash_func(&self.eval_row, &self.eval_E_row2, &self.eval_row_read_ts),
|
||||
hash_func(
|
||||
&self.eval_row,
|
||||
&self.eval_E_row2,
|
||||
&(self.eval_row_read_ts + G::Scalar::one()),
|
||||
),
|
||||
)
|
||||
};
|
||||
|
||||
// multiset check for the row
|
||||
if claim_init_expected_row != claim_init_audit_row[0]
|
||||
|| claim_audit_expected_row != claim_init_audit_row[1]
|
||||
|| claim_read_expected_row != claim_read_write_row_col[0]
|
||||
|| claim_write_expected_row != claim_read_write_row_col[1]
|
||||
{
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
let (claim_init_expected_col, claim_audit_expected_col) = {
|
||||
let addr = IdentityPolynomial::new(r_init_audit_col.len()).evaluate(&r_init_audit_col);
|
||||
let val = EqPolynomial::new(r_y.to_vec()).evaluate(&r_init_audit_col);
|
||||
|
||||
(
|
||||
hash_func(&addr, &val, &G::Scalar::zero()),
|
||||
hash_func(&addr, &val, &self.eval_col_audit_ts),
|
||||
)
|
||||
};
|
||||
|
||||
let (claim_read_expected_col, claim_write_expected_col) = {
|
||||
(
|
||||
hash_func(&self.eval_col, &self.eval_E_col2, &self.eval_col_read_ts),
|
||||
hash_func(
|
||||
&self.eval_col,
|
||||
&self.eval_E_col2,
|
||||
&(self.eval_col_read_ts + G::Scalar::one()),
|
||||
),
|
||||
)
|
||||
};
|
||||
|
||||
// multiset check for the col
|
||||
if claim_init_expected_col != claim_init_audit_col[0]
|
||||
|| claim_audit_expected_col != claim_init_audit_col[1]
|
||||
|| claim_read_expected_col != claim_read_write_row_col[2]
|
||||
|| claim_write_expected_col != claim_read_write_row_col[3]
|
||||
{
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
transcript.absorb(
|
||||
b"e",
|
||||
&[
|
||||
self.eval_row,
|
||||
self.eval_row_read_ts,
|
||||
self.eval_E_row2,
|
||||
self.eval_col,
|
||||
self.eval_col_read_ts,
|
||||
self.eval_E_col2,
|
||||
]
|
||||
.as_slice(),
|
||||
);
|
||||
let c = transcript.squeeze(b"c")?;
|
||||
let eval_joint = self.eval_row
|
||||
+ c * self.eval_row_read_ts
|
||||
+ c * c * self.eval_E_row2
|
||||
+ c * c * c * self.eval_col
|
||||
+ c * c * c * c * self.eval_col_read_ts
|
||||
+ c * c * c * c * c * self.eval_E_col2;
|
||||
let comm_joint = comm.comm_row
|
||||
+ comm.comm_row_read_ts * c
|
||||
+ self.comm_E_row * c * c
|
||||
+ comm.comm_col * c * c * c
|
||||
+ comm.comm_col_read_ts * c * c * c * c
|
||||
+ self.comm_E_col * c * c * c * c * c;
|
||||
|
||||
EE::verify(
|
||||
vk_ee,
|
||||
transcript,
|
||||
&comm_joint,
|
||||
&r_read_write_row_col,
|
||||
&eval_joint,
|
||||
&self.arg_row_col_joint,
|
||||
)?;
|
||||
|
||||
EE::verify(
|
||||
vk_ee,
|
||||
transcript,
|
||||
&comm.comm_row_audit_ts,
|
||||
&r_init_audit_row,
|
||||
&self.eval_row_audit_ts,
|
||||
&self.arg_row_audit_ts,
|
||||
)?;
|
||||
|
||||
EE::verify(
|
||||
vk_ee,
|
||||
transcript,
|
||||
&comm.comm_col_audit_ts,
|
||||
&r_init_audit_col,
|
||||
&self.eval_col_audit_ts,
|
||||
&self.arg_col_audit_ts,
|
||||
)?;
|
||||
|
||||
Ok(self.eval)
|
||||
}
|
||||
}
|
||||
@@ -8,13 +8,17 @@ use ff::Field;
|
||||
use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(bound = "")]
|
||||
pub(crate) struct SumcheckProof<G: Group> {
|
||||
compressed_polys: Vec<CompressedUniPoly<G>>,
|
||||
}
|
||||
|
||||
impl<G: Group> SumcheckProof<G> {
|
||||
pub fn new(compressed_polys: Vec<CompressedUniPoly<G>>) -> Self {
|
||||
Self { compressed_polys }
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
claim: G::Scalar,
|
||||
@@ -302,7 +306,7 @@ pub struct UniPoly<G: Group> {
|
||||
|
||||
// ax^2 + bx + c stored as vec![a,c]
|
||||
// ax^3 + bx^2 + cx + d stored as vec![a,c,d]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct CompressedUniPoly<G: Group> {
|
||||
coeffs_except_linear_term: Vec<G::Scalar>,
|
||||
_p: PhantomData<G>,
|
||||
|
||||
@@ -19,7 +19,10 @@ pub trait RelaxedR1CSSNARKTrait<G: Group>:
|
||||
type VerifierKey: Send + Sync + Serialize + for<'de> Deserialize<'de>;
|
||||
|
||||
/// Produces the keys for the prover and the verifier
|
||||
fn setup(ck: &CommitmentKey<G>, S: &R1CSShape<G>) -> (Self::ProverKey, Self::VerifierKey);
|
||||
fn setup(
|
||||
ck: &CommitmentKey<G>,
|
||||
S: &R1CSShape<G>,
|
||||
) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>;
|
||||
|
||||
/// Produces a new SNARK for a relaxed R1CS
|
||||
fn prove(
|
||||
|
||||
Reference in New Issue
Block a user