diff --git a/Cargo.toml b/Cargo.toml index 7e8d0ca..a11e9f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nova-snark" -version = "0.18.2" +version = "0.19.0" authors = ["Srinath Setty "] edition = "2021" description = "Recursive zkSNARKs without trusted setup" diff --git a/benches/compressed-snark.rs b/benches/compressed-snark.rs index 11bf46b..dbde389 100644 --- a/benches/compressed-snark.rs +++ b/benches/compressed-snark.rs @@ -17,8 +17,10 @@ type G1 = pasta_curves::pallas::Point; type G2 = pasta_curves::vesta::Point; type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; -type S1 = nova_snark::spartan::RelaxedR1CSSNARK; -type S2 = nova_snark::spartan::RelaxedR1CSSNARK; +type CC1 = nova_snark::spartan::spark::TrivialCompComputationEngine; +type CC2 = nova_snark::spartan::spark::TrivialCompComputationEngine; +type S1 = nova_snark::spartan::RelaxedR1CSSNARK; +type S2 = nova_snark::spartan::RelaxedR1CSSNARK; type C1 = NonTrivialTestCircuit<::Scalar>; type C2 = TrivialTestCircuit<::Scalar>; @@ -50,7 +52,7 @@ fn bench_compressed_snark(c: &mut Criterion) { ); // Produce prover and verifier keys for CompressedSNARK - let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp); + let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); // produce a recursive SNARK let num_steps = 3; diff --git a/examples/minroot.rs b/examples/minroot.rs index 6599fea..233c6f8 100644 --- a/examples/minroot.rs +++ b/examples/minroot.rs @@ -256,13 +256,15 @@ fn main() { // produce a compressed SNARK println!("Generating a CompressedSNARK using Spartan with IPA-PC..."); - let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp); + let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); let start = Instant::now(); type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; - type S1 = nova_snark::spartan::RelaxedR1CSSNARK; - type S2 = nova_snark::spartan::RelaxedR1CSSNARK; + type CC1 = nova_snark::spartan::spark::TrivialCompComputationEngine; + type CC2 = nova_snark::spartan::spark::TrivialCompComputationEngine; + type S1 = nova_snark::spartan::RelaxedR1CSSNARK; + type S2 = nova_snark::spartan::RelaxedR1CSSNARK; let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark); println!( diff --git a/src/bellperson/mod.rs b/src/bellperson/mod.rs index 666cab8..65450d7 100644 --- a/src/bellperson/mod.rs +++ b/src/bellperson/mod.rs @@ -46,8 +46,7 @@ mod tests { // First create the shape let mut cs: ShapeCS = ShapeCS::new(); let _ = synthesize_alloc_bit(&mut cs); - let shape = cs.r1cs_shape(); - let ck = cs.commitment_key(); + let (shape, ck) = cs.r1cs_shape(); // Now get the assignment let mut cs: SatisfyingAssignment = SatisfyingAssignment::new(); diff --git a/src/bellperson/r1cs.rs b/src/bellperson/r1cs.rs index c46892e..85b939a 100644 --- a/src/bellperson/r1cs.rs +++ b/src/bellperson/r1cs.rs @@ -22,12 +22,10 @@ pub trait NovaWitness { ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; } -/// `NovaShape` provides methods for acquiring `R1CSShape` and `R1CSGens` from implementers. +/// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` from implementers. pub trait NovaShape { - /// Return an appropriate `R1CSShape` struct. - fn r1cs_shape(&self) -> R1CSShape; - /// Return an appropriate `CommitmentKey` struct. - fn commitment_key(&self) -> CommitmentKey; + /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. + fn r1cs_shape(&self) -> (R1CSShape, CommitmentKey); } impl NovaWitness for SatisfyingAssignment @@ -54,7 +52,7 @@ impl NovaShape for ShapeCS where G::Scalar: PrimeField, { - fn r1cs_shape(&self) -> R1CSShape { + fn r1cs_shape(&self) -> (R1CSShape, CommitmentKey) { let mut A: Vec<(usize, usize, G::Scalar)> = Vec::new(); let mut B: Vec<(usize, usize, G::Scalar)> = Vec::new(); let mut C: Vec<(usize, usize, G::Scalar)> = Vec::new(); @@ -84,11 +82,9 @@ where res.unwrap() }; - S - } + let ck = R1CS::::commitment_key(&S); - fn commitment_key(&self) -> CommitmentKey { - R1CS::::commitment_key(self.num_constraints(), self.num_aux()) + (S, ck) } } diff --git a/src/circuit.rs b/src/circuit.rs index a9e9d04..0b6d52c 100644 --- a/src/circuit.rs +++ b/src/circuit.rs @@ -400,7 +400,7 @@ mod tests { ); let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit1.synthesize(&mut cs); - let (shape1, ck1) = (cs.r1cs_shape(), cs.commitment_key()); + let (shape1, ck1) = cs.r1cs_shape(); assert_eq!(cs.num_constraints(), 9815); // Initialize the shape and ck for the secondary @@ -413,7 +413,7 @@ mod tests { ); let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit2.synthesize(&mut cs); - let (shape2, ck2) = (cs.r1cs_shape(), cs.commitment_key()); + let (shape2, ck2) = cs.r1cs_shape(); assert_eq!(cs.num_constraints(), 10347); // Execute the base case for the primary diff --git a/src/errors.rs b/src/errors.rs index 8188e46..e925f5b 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -44,4 +44,10 @@ pub enum NovaError { /// returned when the transcript engine encounters an overflow of the round number #[error("InternalTranscriptError")] InternalTranscriptError, + /// returned when the multiset check fails + #[error("InvalidMultisetProof")] + InvalidMultisetProof, + /// returned when the product proof check fails + #[error("InvalidProductProof")] + InvalidProductProof, } diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs index 3616f86..4b8adff 100644 --- a/src/gadgets/ecc.rs +++ b/src/gadgets/ecc.rs @@ -975,8 +975,7 @@ mod tests { let mut cs: ShapeCS = ShapeCS::new(); let _ = synthesize_smul::(cs.namespace(|| "synthesize")); println!("Number of constraints: {}", cs.num_constraints()); - let shape = cs.r1cs_shape(); - let ck = cs.commitment_key(); + let (shape, ck) = cs.r1cs_shape(); // Then the satisfying assignment let mut cs: SatisfyingAssignment = SatisfyingAssignment::new(); @@ -1017,8 +1016,7 @@ mod tests { let mut cs: ShapeCS = ShapeCS::new(); let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); println!("Number of constraints: {}", cs.num_constraints()); - let shape = cs.r1cs_shape(); - let ck = cs.commitment_key(); + let (shape, ck) = cs.r1cs_shape(); // Then the satisfying assignment let mut cs: SatisfyingAssignment = SatisfyingAssignment::new(); @@ -1063,8 +1061,7 @@ mod tests { let mut cs: ShapeCS = ShapeCS::new(); let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); println!("Number of constraints: {}", cs.num_constraints()); - let shape = cs.r1cs_shape(); - let ck = cs.commitment_key(); + let (shape, ck) = cs.r1cs_shape(); // Then the satisfying assignment let mut cs: SatisfyingAssignment = SatisfyingAssignment::new(); diff --git a/src/lib.rs b/src/lib.rs index 957d11e..6f424a6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -106,7 +106,7 @@ where ); let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit_primary.synthesize(&mut cs); - let (r1cs_shape_primary, ck_primary) = (cs.r1cs_shape(), cs.commitment_key()); + let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape(); // Initialize ck for the secondary let circuit_secondary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( @@ -117,7 +117,7 @@ where ); let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit_secondary.synthesize(&mut cs); - let (r1cs_shape_secondary, ck_secondary) = (cs.r1cs_shape(), cs.commitment_key()); + let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape(); Self { F_arity_primary, @@ -580,12 +580,15 @@ where /// Creates prover and verifier keys for `CompressedSNARK` pub fn setup( pp: &PublicParams, - ) -> ( - ProverKey, - VerifierKey, - ) { - let (pk_primary, vk_primary) = S1::setup(&pp.ck_primary, &pp.r1cs_shape_primary); - let (pk_secondary, vk_secondary) = S2::setup(&pp.ck_secondary, &pp.r1cs_shape_secondary); + ) -> Result< + ( + ProverKey, + VerifierKey, + ), + NovaError, + > { + let (pk_primary, vk_primary) = S1::setup(&pp.ck_primary, &pp.r1cs_shape_primary)?; + let (pk_secondary, vk_secondary) = S2::setup(&pp.ck_secondary, &pp.r1cs_shape_secondary)?; let pk = ProverKey { pk_primary, @@ -607,7 +610,7 @@ where _p_c2: Default::default(), }; - (pk, vk) + Ok((pk, vk)) } /// Create a new `CompressedSNARK` @@ -785,8 +788,10 @@ mod tests { type G2 = pasta_curves::vesta::Point; type EE1 = provider::ipa_pc::EvaluationEngine; type EE2 = provider::ipa_pc::EvaluationEngine; - type S1 = spartan::RelaxedR1CSSNARK; - type S2 = spartan::RelaxedR1CSSNARK; + type CC1 = spartan::spark::TrivialCompComputationEngine; + type CC2 = spartan::spark::TrivialCompComputationEngine; + type S1 = spartan::RelaxedR1CSSNARK; + type S2 = spartan::RelaxedR1CSSNARK; use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; use core::marker::PhantomData; use ff::PrimeField; @@ -1011,7 +1016,7 @@ mod tests { assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); // produce the prover and verifier keys for compressed snark - let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp); + let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); // produce a compressed SNARK let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark); @@ -1028,6 +1033,91 @@ mod tests { assert!(res.is_ok()); } + #[test] + fn test_ivc_nontrivial_with_spark_compression() { + let circuit_primary = TrivialTestCircuit::default(); + let circuit_secondary = CubicCircuit::default(); + + // produce public parameters + let pp = PublicParams::< + G1, + G2, + TrivialTestCircuit<::Scalar>, + CubicCircuit<::Scalar>, + >::setup(circuit_primary.clone(), circuit_secondary.clone()); + + let num_steps = 3; + + // produce a recursive SNARK + let mut recursive_snark: Option< + RecursiveSNARK< + G1, + G2, + TrivialTestCircuit<::Scalar>, + CubicCircuit<::Scalar>, + >, + > = None; + + for _i in 0..num_steps { + let res = RecursiveSNARK::prove_step( + &pp, + recursive_snark, + circuit_primary.clone(), + circuit_secondary.clone(), + vec![::Scalar::one()], + vec![::Scalar::zero()], + ); + assert!(res.is_ok()); + recursive_snark = Some(res.unwrap()); + } + + assert!(recursive_snark.is_some()); + let recursive_snark = recursive_snark.unwrap(); + + // verify the recursive SNARK + let res = recursive_snark.verify( + &pp, + num_steps, + vec![::Scalar::one()], + vec![::Scalar::zero()], + ); + assert!(res.is_ok()); + + let (zn_primary, zn_secondary) = res.unwrap(); + + // sanity: check the claimed output with a direct computation of the same + assert_eq!(zn_primary, vec![::Scalar::one()]); + let mut zn_secondary_direct = vec![::Scalar::zero()]; + for _i in 0..num_steps { + zn_secondary_direct = CubicCircuit::default().output(&zn_secondary_direct); + } + assert_eq!(zn_secondary, zn_secondary_direct); + assert_eq!(zn_secondary, vec![::Scalar::from(2460515u64)]); + + // run the compressed snark with Spark compiler + type CC1Prime = spartan::spark::SparkEngine; + type CC2Prime = spartan::spark::SparkEngine; + type S1Prime = spartan::RelaxedR1CSSNARK; + type S2Prime = spartan::RelaxedR1CSSNARK; + + // produce the prover and verifier keys for compressed snark + let (pk, vk) = CompressedSNARK::<_, _, _, _, S1Prime, S2Prime>::setup(&pp).unwrap(); + + // produce a compressed SNARK + let res = CompressedSNARK::<_, _, _, _, S1Prime, S2Prime>::prove(&pp, &pk, &recursive_snark); + assert!(res.is_ok()); + let compressed_snark = res.unwrap(); + + // verify the compressed SNARK + let res = compressed_snark.verify( + &vk, + num_steps, + vec![::Scalar::one()], + vec![::Scalar::zero()], + ); + assert!(res.is_ok()); + } + #[test] fn test_ivc_nondet_with_compression() { // y is a non-deterministic advice representing the fifth root of the input at a step. @@ -1162,7 +1252,7 @@ mod tests { assert!(res.is_ok()); // produce the prover and verifier keys for compressed snark - let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp); + let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); // produce a compressed SNARK let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark); diff --git a/src/nifs.rs b/src/nifs.rs index a2608e5..8c46f58 100644 --- a/src/nifs.rs +++ b/src/nifs.rs @@ -171,8 +171,7 @@ mod tests { // First create the shape let mut cs: ShapeCS = ShapeCS::new(); let _ = synthesize_tiny_r1cs_bellperson(&mut cs, None); - let shape = cs.r1cs_shape(); - let ck = cs.commitment_key(); + let (shape, ck) = cs.r1cs_shape(); let ro_consts = <::RO as ROTrait<::Base, ::Scalar>>::Constants::new(); @@ -305,7 +304,7 @@ mod tests { }; // generate generators and ro constants - let ck = R1CS::::commitment_key(num_cons, num_vars); + let ck = R1CS::::commitment_key(&S); let ro_consts = <::RO as ROTrait<::Base, ::Scalar>>::Constants::new(); diff --git a/src/provider/ipa_pc.rs b/src/provider/ipa_pc.rs index 7aa4f0d..e630d8c 100644 --- a/src/provider/ipa_pc.rs +++ b/src/provider/ipa_pc.rs @@ -190,6 +190,8 @@ where ) -> Result { transcript.dom_sep(Self::protocol_name()); + let (ck, _) = ck.split_at(U.b_vec.len()); + if U.b_vec.len() != W.a_vec.len() { return Err(NovaError::InvalidInputLength); } @@ -272,7 +274,7 @@ where // we create mutable copies of vectors and generators let mut a_vec = W.a_vec.to_vec(); let mut b_vec = U.b_vec.to_vec(); - let mut ck = ck.clone(); + let mut ck = ck; for _i in 0..(U.b_vec.len() as f64).log2() as usize { let (L, R, a_vec_folded, b_vec_folded, ck_folded) = prove_inner(&a_vec, &b_vec, &ck, transcript)?; @@ -300,6 +302,8 @@ where U: &InnerProductInstance, transcript: &mut G::TE, ) -> Result<(), NovaError> { + let (ck, _) = ck.split_at(U.b_vec.len()); + transcript.dom_sep(Self::protocol_name()); if U.b_vec.len() != n || n != (1 << self.L_vec.len()) @@ -383,7 +387,7 @@ where }; let ck_hat = { - let c = CE::::commit(ck, &s).compress(); + let c = CE::::commit(&ck, &s).compress(); CommitmentKey::::reinterpret_commitments_as_ck(&[c])? }; diff --git a/src/r1cs.rs b/src/r1cs.rs index cffb443..7e8d12c 100644 --- a/src/r1cs.rs +++ b/src/r1cs.rs @@ -72,8 +72,11 @@ pub struct RelaxedR1CSInstance { impl R1CS { /// Samples public parameters for the specified number of constraints and variables in an R1CS - pub fn commitment_key(num_cons: usize, num_vars: usize) -> CommitmentKey { - G::CE::setup(b"ck", max(num_vars, num_cons)) + pub fn commitment_key(S: &R1CSShape) -> CommitmentKey { + let num_cons = S.num_cons; + let num_vars = S.num_vars; + let num_nz = max(max(S.A.len(), S.B.len()), S.C.len()); + G::CE::setup(b"ck", max(max(num_cons, num_vars), num_nz)) } } diff --git a/src/spartan/math.rs b/src/spartan/math.rs new file mode 100644 index 0000000..ba809d7 --- /dev/null +++ b/src/spartan/math.rs @@ -0,0 +1,30 @@ +pub trait Math { + fn pow2(self) -> usize; + fn get_bits(self, num_bits: usize) -> Vec; + fn log_2(self) -> usize; +} + +impl Math for usize { + #[inline] + fn pow2(self) -> usize { + let base: usize = 2; + base.pow(self as u32) + } + + /// Returns the num_bits from n in a canonical order + fn get_bits(self, num_bits: usize) -> Vec { + (0..num_bits) + .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) + .collect::>() + } + + fn log_2(self) -> usize { + assert_ne!(self, 0); + + if self.is_power_of_two() { + (1usize.leading_zeros() - self.leading_zeros()) as usize + } else { + (0usize.leading_zeros() - self.leading_zeros()) as usize + } + } +} diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index dc1efcb..b6834d2 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -1,6 +1,8 @@ //! This module implements RelaxedR1CSSNARKTrait using Spartan that is generic //! over the polynomial commitment and evaluation argument (i.e., a PCS) -pub mod polynomial; +mod math; +pub(crate) mod polynomial; +pub mod spark; mod sumcheck; use crate::{ @@ -8,6 +10,7 @@ use crate::{ r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, traits::{ evaluation::EvaluationEngineTrait, snark::RelaxedR1CSSNARKTrait, Group, TranscriptEngineTrait, + TranscriptReprTrait, }, CommitmentKey, }; @@ -18,20 +21,75 @@ use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sumcheck::SumcheckProof; +/// A trait that defines the behavior of a computation commitment engine +pub trait CompCommitmentEngineTrait> { + /// A type that holds opening hint + type Decommitment: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A type that holds a commitment + type Commitment: Clone + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de>; + + /// A type that holds an evaluation argument + type EvaluationArgument: Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// commits to R1CS matrices + fn commit( + ck: &CommitmentKey, + S: &R1CSShape, + ) -> Result<(Self::Commitment, Self::Decommitment), NovaError>; + + /// proves an evaluation of R1CS matrices viewed as polynomials + fn prove( + ck: &CommitmentKey, + ek: &EE::ProverKey, + S: &R1CSShape, + decomm: &Self::Decommitment, + comm: &Self::Commitment, + r: &(&[G::Scalar], &[G::Scalar]), + transcript: &mut G::TE, + ) -> Result; + + /// verifies an evaluation of R1CS matrices viewed as polynomials and returns verified evaluations + fn verify( + vk: &EE::VerifierKey, + comm: &Self::Commitment, + r: &(&[G::Scalar], &[G::Scalar]), + arg: &Self::EvaluationArgument, + transcript: &mut G::TE, + ) -> Result<(G::Scalar, G::Scalar, G::Scalar), NovaError>; +} + /// A type that represents the prover's key #[derive(Serialize, Deserialize)] #[serde(bound = "")] -pub struct ProverKey> { +pub struct ProverKey< + G: Group, + EE: EvaluationEngineTrait, + CC: CompCommitmentEngineTrait, +> { pk_ee: EE::ProverKey, S: R1CSShape, + decomm: CC::Decommitment, + comm: CC::Commitment, } /// A type that represents the verifier's key #[derive(Serialize, Deserialize)] #[serde(bound = "")] -pub struct VerifierKey> { +pub struct VerifierKey< + G: Group, + EE: EvaluationEngineTrait, + CC: CompCommitmentEngineTrait, +> { + num_cons: usize, + num_vars: usize, vk_ee: EE::VerifierKey, - S: R1CSShape, + comm: CC::Commitment, } /// A succinct proof of knowledge of a witness to a relaxed R1CS instance @@ -39,7 +97,11 @@ pub struct VerifierKey> { /// the commitment to a vector viewed as a polynomial commitment #[derive(Serialize, Deserialize)] #[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { +pub struct RelaxedR1CSSNARK< + G: Group, + EE: EvaluationEngineTrait, + CC: CompCommitmentEngineTrait, +> { sc_proof_outer: SumcheckProof, claims_outer: (G::Scalar, G::Scalar, G::Scalar), eval_E: G::Scalar, @@ -49,20 +111,40 @@ pub struct RelaxedR1CSSNARK> eval_E_prime: G::Scalar, eval_W_prime: G::Scalar, eval_arg: EE::EvaluationArgument, + eval_arg_cc: CC::EvaluationArgument, } -impl> RelaxedR1CSSNARKTrait - for RelaxedR1CSSNARK +impl, CC: CompCommitmentEngineTrait> + RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; - fn setup(ck: &CommitmentKey, S: &R1CSShape) -> (Self::ProverKey, Self::VerifierKey) { + fn setup( + ck: &CommitmentKey, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { let (pk_ee, vk_ee) = EE::setup(ck); - let pk = ProverKey { pk_ee, S: S.pad() }; - let vk = VerifierKey { vk_ee, S: S.pad() }; - (pk, vk) + let S = S.pad(); + + let (comm, decomm) = CC::commit(ck, &S)?; + + let vk = VerifierKey { + num_cons: S.num_cons, + num_vars: S.num_vars, + vk_ee, + comm: comm.clone(), + }; + + let pk = ProverKey { + pk_ee, + S, + comm, + decomm, + }; + + Ok((pk, vk)) } /// produces a succinct proof of satisfiability of a RelaxedR1CS instance @@ -81,8 +163,8 @@ impl> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait) -> Result<(), NovaError> { let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); - // append the R1CSShape and RelaxedR1CSInstance to the transcript - transcript.absorb(b"S", &vk.S); + // append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript + transcript.absorb(b"C", &vk.comm); transcript.absorb(b"U", U); let (num_rounds_x, num_rounds_y) = ( - (vk.S.num_cons as f64).log2() as usize, - ((vk.S.num_vars as f64).log2() as usize + 1), + (vk.num_cons as f64).log2() as usize, + ((vk.num_vars as f64).log2() as usize + 1), ); // outer sum-check @@ -333,37 +427,21 @@ impl> RelaxedR1CSSNARKTrait>(), ); - SparsePolynomial::new((vk.S.num_vars as f64).log2() as usize, poly_X).evaluate(&r_y[1..]) + SparsePolynomial::new((vk.num_vars as f64).log2() as usize, poly_X).evaluate(&r_y[1..]) }; (G::Scalar::one() - r_y[0]) * self.eval_W + r_y[0] * eval_X }; - let evaluate_as_sparse_polynomial = |S: &R1CSShape, - r_x: &[G::Scalar], - r_y: &[G::Scalar]| - -> (G::Scalar, G::Scalar, G::Scalar) { - let evaluate_with_table = - |M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar { - (0..M.len()) - .collect::>() - .par_iter() - .map(|&i| { - let (row, col, val) = M[i]; - T_x[row] * T_y[col] * val - }) - .reduce(G::Scalar::zero, |acc, x| acc + x) - }; - - let T_x = EqPolynomial::new(r_x.to_vec()).evals(); - let T_y = EqPolynomial::new(r_y.to_vec()).evals(); - let eval_A_r = evaluate_with_table(&S.A, &T_x, &T_y); - let eval_B_r = evaluate_with_table(&S.B, &T_x, &T_y); - let eval_C_r = evaluate_with_table(&S.C, &T_x, &T_y); - (eval_A_r, eval_B_r, eval_C_r) - }; + // verify evaluation argument to retrieve evaluations of R1CS matrices + let (eval_A, eval_B, eval_C) = CC::verify( + &vk.vk_ee, + &vk.comm, + &(&r_x, &r_y), + &self.eval_arg_cc, + &mut transcript, + )?; - let (eval_A_r, eval_B_r, eval_C_r) = evaluate_as_sparse_polynomial(&vk.S, &r_x, &r_y); - let claim_inner_final_expected = (eval_A_r + r * eval_B_r + r * r * eval_C_r) * eval_Z; + let claim_inner_final_expected = (eval_A + r * eval_B + r * r * eval_C) * eval_Z; if claim_inner_final != claim_inner_final_expected { return Err(NovaError::InvalidSumcheckProof); } diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 7706119..2486ece 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -2,6 +2,7 @@ use core::ops::Index; use ff::PrimeField; use rayon::prelude::*; +use serde::{Deserialize, Serialize}; pub(crate) struct EqPolynomial { r: Vec, @@ -45,8 +46,8 @@ impl EqPolynomial { } } -#[derive(Debug)] -pub(crate) struct MultilinearPolynomial { +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MultilinearPolynomial { num_vars: usize, // the number of variables in the multilinear polynomial Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } @@ -97,6 +98,23 @@ impl MultilinearPolynomial { .map(|i| chis[i] * self.Z[i]) .reduce(Scalar::zero, |x, y| x + y) } + + pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar { + EqPolynomial::new(r.to_vec()) + .evals() + .into_par_iter() + .zip(Z.into_par_iter()) + .map(|(a, b)| a * b) + .reduce(Scalar::zero, |x, y| x + y) + } + + pub fn split(&self, idx: usize) -> (Self, Self) { + assert!(idx < self.len()); + ( + Self::new(self.Z[..idx].to_vec()), + Self::new(self.Z[idx..2 * idx].to_vec()), + ) + } } impl Index for MultilinearPolynomial { diff --git a/src/spartan/spark/mod.rs b/src/spartan/spark/mod.rs new file mode 100644 index 0000000..af7923d --- /dev/null +++ b/src/spartan/spark/mod.rs @@ -0,0 +1,220 @@ +//! This module implements `CompCommitmentEngineTrait` using Spartan's SPARK compiler +//! We also provide a trivial implementation that has the verifier evaluate the sparse polynomials +use crate::{ + errors::NovaError, + r1cs::R1CSShape, + spartan::{math::Math, CompCommitmentEngineTrait}, + traits::{evaluation::EvaluationEngineTrait, Group, TranscriptReprTrait}, + CommitmentKey, +}; +use core::marker::PhantomData; +use serde::{Deserialize, Serialize}; + +/// A trivial implementation of `ComputationCommitmentEngineTrait` +pub struct TrivialCompComputationEngine> { + _p: PhantomData, + _p2: PhantomData, +} + +/// Provides an implementation of a trivial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct TrivialCommitment { + S: R1CSShape, +} + +/// Provides an implementation of a trivial decommitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct TrivialDecommitment { + _p: PhantomData, +} + +/// Provides an implementation of a trivial evaluation argument +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct TrivialEvaluationArgument { + _p: PhantomData, +} + +impl TranscriptReprTrait for TrivialCommitment { + fn to_transcript_bytes(&self) -> Vec { + self.S.to_transcript_bytes() + } +} + +impl> CompCommitmentEngineTrait + for TrivialCompComputationEngine +{ + type Decommitment = TrivialDecommitment; + type Commitment = TrivialCommitment; + type EvaluationArgument = TrivialEvaluationArgument; + + /// commits to R1CS matrices + fn commit( + _ck: &CommitmentKey, + S: &R1CSShape, + ) -> Result<(Self::Commitment, Self::Decommitment), NovaError> { + Ok(( + TrivialCommitment { S: S.clone() }, + TrivialDecommitment { + _p: Default::default(), + }, + )) + } + + /// proves an evaluation of R1CS matrices viewed as polynomials + fn prove( + _ck: &CommitmentKey, + _ek: &EE::ProverKey, + _S: &R1CSShape, + _decomm: &Self::Decommitment, + _comm: &Self::Commitment, + _r: &(&[G::Scalar], &[G::Scalar]), + _transcript: &mut G::TE, + ) -> Result { + Ok(TrivialEvaluationArgument { + _p: Default::default(), + }) + } + + /// verifies an evaluation of R1CS matrices viewed as polynomials + fn verify( + _vk: &EE::VerifierKey, + comm: &Self::Commitment, + r: &(&[G::Scalar], &[G::Scalar]), + _arg: &Self::EvaluationArgument, + _transcript: &mut G::TE, + ) -> Result<(G::Scalar, G::Scalar, G::Scalar), NovaError> { + let (r_x, r_y) = r; + let evals = SparsePolynomial::::multi_evaluate(&[&comm.S.A, &comm.S.B, &comm.S.C], r_x, r_y); + Ok((evals[0], evals[1], evals[2])) + } +} + +mod product; +mod sparse; + +use sparse::{SparseEvaluationArgument, SparsePolynomial, SparsePolynomialCommitment}; + +/// A non-trivial implementation of `CompCommitmentEngineTrait` using Spartan's SPARK compiler +pub struct SparkEngine> { + _p: PhantomData, + _p2: PhantomData, +} + +/// An implementation of Spark decommitment +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparkDecommitment { + A: SparsePolynomial, + B: SparsePolynomial, + C: SparsePolynomial, +} + +impl SparkDecommitment { + fn new(S: &R1CSShape) -> Self { + let ell = (S.num_cons.log_2(), S.num_vars.log_2() + 1); + let A = SparsePolynomial::new(ell, &S.A); + let B = SparsePolynomial::new(ell, &S.B); + let C = SparsePolynomial::new(ell, &S.C); + + Self { A, B, C } + } + + fn commit(&self, ck: &CommitmentKey) -> SparkCommitment { + let comm_A = self.A.commit(ck); + let comm_B = self.B.commit(ck); + let comm_C = self.C.commit(ck); + + SparkCommitment { + comm_A, + comm_B, + comm_C, + } + } +} + +/// An implementation of Spark commitment +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparkCommitment { + comm_A: SparsePolynomialCommitment, + comm_B: SparsePolynomialCommitment, + comm_C: SparsePolynomialCommitment, +} + +impl TranscriptReprTrait for SparkCommitment { + fn to_transcript_bytes(&self) -> Vec { + let mut bytes = self.comm_A.to_transcript_bytes(); + bytes.extend(self.comm_B.to_transcript_bytes()); + bytes.extend(self.comm_C.to_transcript_bytes()); + bytes + } +} + +/// Provides an implementation of a trivial evaluation argument +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparkEvaluationArgument> { + arg_A: SparseEvaluationArgument, + arg_B: SparseEvaluationArgument, + arg_C: SparseEvaluationArgument, +} + +impl> CompCommitmentEngineTrait + for SparkEngine +{ + type Decommitment = SparkDecommitment; + type Commitment = SparkCommitment; + type EvaluationArgument = SparkEvaluationArgument; + + /// commits to R1CS matrices + fn commit( + ck: &CommitmentKey, + S: &R1CSShape, + ) -> Result<(Self::Commitment, Self::Decommitment), NovaError> { + let sparse = SparkDecommitment::new(S); + let comm = sparse.commit(ck); + Ok((comm, sparse)) + } + + /// proves an evaluation of R1CS matrices viewed as polynomials + fn prove( + ck: &CommitmentKey, + pk_ee: &EE::ProverKey, + S: &R1CSShape, + decomm: &Self::Decommitment, + comm: &Self::Commitment, + r: &(&[G::Scalar], &[G::Scalar]), + transcript: &mut G::TE, + ) -> Result { + let arg_A = + SparseEvaluationArgument::prove(ck, pk_ee, &decomm.A, &S.A, &comm.comm_A, r, transcript)?; + let arg_B = + SparseEvaluationArgument::prove(ck, pk_ee, &decomm.B, &S.B, &comm.comm_B, r, transcript)?; + let arg_C = + SparseEvaluationArgument::prove(ck, pk_ee, &decomm.C, &S.C, &comm.comm_C, r, transcript)?; + + Ok(SparkEvaluationArgument { + arg_A, + arg_B, + arg_C, + }) + } + + /// verifies an evaluation of R1CS matrices viewed as polynomials + fn verify( + vk_ee: &EE::VerifierKey, + comm: &Self::Commitment, + r: &(&[G::Scalar], &[G::Scalar]), + arg: &Self::EvaluationArgument, + transcript: &mut G::TE, + ) -> Result<(G::Scalar, G::Scalar, G::Scalar), NovaError> { + let eval_A = arg.arg_A.verify(vk_ee, &comm.comm_A, r, transcript)?; + let eval_B = arg.arg_B.verify(vk_ee, &comm.comm_B, r, transcript)?; + let eval_C = arg.arg_C.verify(vk_ee, &comm.comm_C, r, transcript)?; + + Ok((eval_A, eval_B, eval_C)) + } +} diff --git a/src/spartan/spark/product.rs b/src/spartan/spark/product.rs new file mode 100644 index 0000000..6967ce0 --- /dev/null +++ b/src/spartan/spark/product.rs @@ -0,0 +1,477 @@ +use crate::{ + errors::NovaError, + spartan::{ + math::Math, + polynomial::{EqPolynomial, MultilinearPolynomial}, + sumcheck::{CompressedUniPoly, SumcheckProof, UniPoly}, + }, + traits::{Group, TranscriptEngineTrait}, +}; +use core::marker::PhantomData; +use ff::{Field, PrimeField}; +use serde::{Deserialize, Serialize}; + +pub(crate) struct IdentityPolynomial { + ell: usize, + _p: PhantomData, +} + +impl IdentityPolynomial { + pub fn new(ell: usize) -> Self { + IdentityPolynomial { + ell, + _p: Default::default(), + } + } + + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.ell, r.len()); + (0..self.ell) + .map(|i| Scalar::from(2_usize.pow((self.ell - i - 1) as u32) as u64) * r[i]) + .fold(Scalar::zero(), |acc, item| acc + item) + } +} + +impl SumcheckProof { + pub fn prove_cubic( + claim: &G::Scalar, + num_rounds: usize, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, + poly_C: &mut MultilinearPolynomial, + comb_func: F, + transcript: &mut G::TE, + ) -> Result<(Self, Vec, Vec), NovaError> + where + F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar, + { + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + for _j in 0..num_rounds { + let mut eval_point_0 = G::Scalar::zero(); + let mut eval_point_2 = G::Scalar::zero(); + let mut eval_point_3 = G::Scalar::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + + let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + //derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + // bound all tables to the verifier's challenege + poly_A.bound_poly_var_top(&r_i); + poly_B.bound_poly_var_top(&r_i); + poly_C.bound_poly_var_top(&r_i); + e = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + Ok(( + Self::new(cubic_polys), + r, + vec![poly_A[0], poly_B[0], poly_C[0]], + )) + } + + pub fn prove_cubic_batched( + claim: &G::Scalar, + num_rounds: usize, + poly_vec: ( + &mut Vec<&mut MultilinearPolynomial>, + &mut Vec<&mut MultilinearPolynomial>, + &mut MultilinearPolynomial, + ), + coeffs: &[G::Scalar], + comb_func: F, + transcript: &mut G::TE, + ) -> Result< + ( + Self, + Vec, + (Vec, Vec, G::Scalar), + ), + NovaError, + > + where + F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar, + { + let (poly_A_vec, poly_B_vec, poly_C) = poly_vec; + + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + + for _j in 0..num_rounds { + let mut evals: Vec<(G::Scalar, G::Scalar, G::Scalar)> = Vec::new(); + + for (poly_A, poly_B) in poly_A_vec.iter().zip(poly_B_vec.iter()) { + let mut eval_point_0 = G::Scalar::zero(); + let mut eval_point_2 = G::Scalar::zero(); + let mut eval_point_3 = G::Scalar::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + + evals.push((eval_point_0, eval_point_2, eval_point_3)); + } + + let evals_combined_0 = (0..evals.len()) + .map(|i| evals[i].0 * coeffs[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + let evals_combined_2 = (0..evals.len()) + .map(|i| evals[i].1 * coeffs[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + let evals_combined_3 = (0..evals.len()) + .map(|i| evals[i].2 * coeffs[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let evals = vec![ + evals_combined_0, + e - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + // bound all tables to the verifier's challenege + for (poly_A, poly_B) in poly_A_vec.iter_mut().zip(poly_B_vec.iter_mut()) { + poly_A.bound_poly_var_top(&r_i); + poly_B.bound_poly_var_top(&r_i); + } + poly_C.bound_poly_var_top(&r_i); + + e = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + let poly_A_final = (0..poly_A_vec.len()).map(|i| poly_A_vec[i][0]).collect(); + let poly_B_final = (0..poly_B_vec.len()).map(|i| poly_B_vec[i][0]).collect(); + let claims_prod = (poly_A_final, poly_B_final, poly_C[0]); + + Ok((SumcheckProof::new(cubic_polys), r, claims_prod)) + } +} + +#[derive(Debug)] +pub struct ProductArgumentInputs { + left_vec: Vec>, + right_vec: Vec>, +} + +impl ProductArgumentInputs { + fn compute_layer( + inp_left: &MultilinearPolynomial, + inp_right: &MultilinearPolynomial, + ) -> ( + MultilinearPolynomial, + MultilinearPolynomial, + ) { + let len = inp_left.len() + inp_right.len(); + let outp_left = (0..len / 4) + .map(|i| inp_left[i] * inp_right[i]) + .collect::>(); + let outp_right = (len / 4..len / 2) + .map(|i| inp_left[i] * inp_right[i]) + .collect::>(); + + ( + MultilinearPolynomial::new(outp_left), + MultilinearPolynomial::new(outp_right), + ) + } + + pub fn new(poly: &MultilinearPolynomial) -> Self { + let mut left_vec: Vec> = Vec::new(); + let mut right_vec: Vec> = Vec::new(); + let num_layers = poly.len().log_2(); + let (outp_left, outp_right) = poly.split(poly.len() / 2); + + left_vec.push(outp_left); + right_vec.push(outp_right); + + for i in 0..num_layers - 1 { + let (outp_left, outp_right) = + ProductArgumentInputs::::compute_layer(&left_vec[i], &right_vec[i]); + left_vec.push(outp_left); + right_vec.push(outp_right); + } + + Self { + left_vec, + right_vec, + } + } + + pub fn evaluate(&self) -> G::Scalar { + let len = self.left_vec.len(); + assert_eq!(self.left_vec[len - 1].get_num_vars(), 0); + assert_eq!(self.right_vec[len - 1].get_num_vars(), 0); + self.left_vec[len - 1][0] * self.right_vec[len - 1][0] + } +} +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct LayerProofBatched { + proof: SumcheckProof, + claims_prod_left: Vec, + claims_prod_right: Vec, +} + +impl LayerProofBatched { + pub fn verify( + &self, + claim: G::Scalar, + num_rounds: usize, + degree_bound: usize, + transcript: &mut G::TE, + ) -> Result<(G::Scalar, Vec), NovaError> { + self + .proof + .verify(claim, num_rounds, degree_bound, transcript) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub(crate) struct ProductArgumentBatched { + proof: Vec>, +} + +impl ProductArgumentBatched { + pub fn prove( + poly_vec: &[&MultilinearPolynomial], + transcript: &mut G::TE, + ) -> Result<(Self, Vec, Vec), NovaError> { + let mut prod_circuit_vec: Vec<_> = (0..poly_vec.len()) + .map(|i| ProductArgumentInputs::::new(poly_vec[i])) + .collect(); + + let mut proof_layers: Vec> = Vec::new(); + let num_layers = prod_circuit_vec[0].left_vec.len(); + let evals = (0..prod_circuit_vec.len()) + .map(|i| prod_circuit_vec[i].evaluate()) + .collect::>(); + + let mut claims_to_verify = evals.clone(); + let mut rand = Vec::new(); + for layer_id in (0..num_layers).rev() { + let len = prod_circuit_vec[0].left_vec[layer_id].len() + + prod_circuit_vec[0].right_vec[layer_id].len(); + + let mut poly_C = MultilinearPolynomial::new(EqPolynomial::new(rand.clone()).evals()); + assert_eq!(poly_C.len(), len / 2); + + let num_rounds_prod = poly_C.len().log_2(); + let comb_func_prod = |poly_A_comp: &G::Scalar, + poly_B_comp: &G::Scalar, + poly_C_comp: &G::Scalar| + -> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; + + let mut poly_A_batched: Vec<&mut MultilinearPolynomial> = Vec::new(); + let mut poly_B_batched: Vec<&mut MultilinearPolynomial> = Vec::new(); + for prod_circuit in prod_circuit_vec.iter_mut() { + poly_A_batched.push(&mut prod_circuit.left_vec[layer_id]); + poly_B_batched.push(&mut prod_circuit.right_vec[layer_id]) + } + let poly_vec = (&mut poly_A_batched, &mut poly_B_batched, &mut poly_C); + + // produce a fresh set of coeffs and a joint claim + let coeff_vec = { + let s = transcript.squeeze(b"r")?; + let mut s_vec = vec![s]; + for i in 1..claims_to_verify.len() { + s_vec.push(s_vec[i - 1] * s); + } + s_vec + }; + + let claim = (0..claims_to_verify.len()) + .map(|i| claims_to_verify[i] * coeff_vec[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let (proof, rand_prod, claims_prod) = SumcheckProof::prove_cubic_batched( + &claim, + num_rounds_prod, + poly_vec, + &coeff_vec, + comb_func_prod, + transcript, + )?; + + let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod; + + let v = { + let mut v = claims_prod_left.clone(); + v.extend(&claims_prod_right); + v + }; + transcript.absorb(b"p", &v.as_slice()); + + // produce a random challenge to condense two claims into a single claim + let r_layer = transcript.squeeze(b"c")?; + + claims_to_verify = (0..prod_circuit_vec.len()) + .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) + .collect::>(); + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + + proof_layers.push(LayerProofBatched { + proof, + claims_prod_left, + claims_prod_right, + }); + } + + Ok(( + ProductArgumentBatched { + proof: proof_layers, + }, + evals, + rand, + )) + } + + pub fn verify( + &self, + claims_prod_vec: &[G::Scalar], + len: usize, + transcript: &mut G::TE, + ) -> Result<(Vec, Vec), NovaError> { + let num_layers = len.log_2(); + + let mut rand: Vec = Vec::new(); + if self.proof.len() != num_layers { + return Err(NovaError::InvalidProductProof); + } + + let mut claims_to_verify = claims_prod_vec.to_owned(); + for (num_rounds, i) in (0..num_layers).enumerate() { + // produce random coefficients, one for each instance + let coeff_vec = { + let s = transcript.squeeze(b"r")?; + let mut s_vec = vec![s]; + for i in 1..claims_to_verify.len() { + s_vec.push(s_vec[i - 1] * s); + } + s_vec + }; + + // produce a joint claim + let claim = (0..claims_to_verify.len()) + .map(|i| claims_to_verify[i] * coeff_vec[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript)?; + + let claims_prod_left = &self.proof[i].claims_prod_left; + let claims_prod_right = &self.proof[i].claims_prod_right; + if claims_prod_left.len() != claims_prod_vec.len() + || claims_prod_right.len() != claims_prod_vec.len() + { + return Err(NovaError::InvalidProductProof); + } + + let v = { + let mut v = claims_prod_left.clone(); + v.extend(claims_prod_right); + v + }; + transcript.absorb(b"p", &v.as_slice()); + + if rand.len() != rand_prod.len() { + return Err(NovaError::InvalidProductProof); + } + + let eq: G::Scalar = (0..rand.len()) + .map(|i| { + rand[i] * rand_prod[i] + (G::Scalar::one() - rand[i]) * (G::Scalar::one() - rand_prod[i]) + }) + .fold(G::Scalar::one(), |acc, item| acc * item); + let claim_expected: G::Scalar = (0..claims_prod_vec.len()) + .map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq)) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + if claim_expected != claim_last { + return Err(NovaError::InvalidProductProof); + } + + // produce a random challenge + let r_layer = transcript.squeeze(b"c")?; + + claims_to_verify = (0..claims_prod_left.len()) + .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) + .collect::>(); + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + } + Ok((claims_to_verify, rand)) + } +} diff --git a/src/spartan/spark/sparse.rs b/src/spartan/spark/sparse.rs new file mode 100644 index 0000000..2f1d597 --- /dev/null +++ b/src/spartan/spark/sparse.rs @@ -0,0 +1,732 @@ +#![allow(clippy::type_complexity)] +#![allow(clippy::too_many_arguments)] +#![allow(clippy::needless_range_loop)] +use crate::{ + errors::NovaError, + spartan::{ + math::Math, + polynomial::{EqPolynomial, MultilinearPolynomial}, + spark::product::{IdentityPolynomial, ProductArgumentBatched}, + SumcheckProof, + }, + traits::{ + commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Group, + TranscriptEngineTrait, TranscriptReprTrait, + }, + Commitment, CommitmentKey, +}; +use ff::Field; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +/// A type that holds a sparse polynomial in dense representation +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparsePolynomial { + ell: (usize, usize), // number of variables in each dimension + + // dense representation + row: Vec, + col: Vec, + val: Vec, + + // timestamp polynomials + row_read_ts: Vec, + row_audit_ts: Vec, + col_read_ts: Vec, + col_audit_ts: Vec, +} + +/// A type that holds a commitment to a sparse polynomial +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparsePolynomialCommitment { + ell: (usize, usize), // number of variables + size: usize, // size of the dense representation + + // commitments to the dense representation + comm_row: Commitment, + comm_col: Commitment, + comm_val: Commitment, + + // commitments to the timestamp polynomials + comm_row_read_ts: Commitment, + comm_row_audit_ts: Commitment, + comm_col_read_ts: Commitment, + comm_col_audit_ts: Commitment, +} + +impl TranscriptReprTrait for SparsePolynomialCommitment { + fn to_transcript_bytes(&self) -> Vec { + [ + self.comm_row, + self.comm_col, + self.comm_val, + self.comm_row_read_ts, + self.comm_row_audit_ts, + self.comm_col_read_ts, + self.comm_col_audit_ts, + ] + .as_slice() + .to_transcript_bytes() + } +} + +impl SparsePolynomial { + pub fn new(ell: (usize, usize), M: &[(usize, usize, G::Scalar)]) -> Self { + let mut row = M.iter().map(|(r, _, _)| *r).collect::>(); + let mut col = M.iter().map(|(_, c, _)| *c).collect::>(); + let mut val = M.iter().map(|(_, _, v)| *v).collect::>(); + + let num_ops = M.len().next_power_of_two(); + let num_cells_row = ell.0.pow2(); + let num_cells_col = ell.1.pow2(); + row.resize(num_ops, 0usize); + col.resize(num_ops, 0usize); + val.resize(num_ops, G::Scalar::zero()); + + // timestamp calculation routine + let timestamp_calc = + |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> (Vec, Vec) { + let mut read_ts = vec![0usize; num_ops]; + let mut audit_ts = vec![0usize; num_cells]; + + assert!(num_ops >= addr_trace.len()); + for i in 0..addr_trace.len() { + let addr = addr_trace[i]; + assert!(addr < num_cells); + let r_ts = audit_ts[addr]; + read_ts[i] = r_ts; + + let w_ts = r_ts + 1; + audit_ts[addr] = w_ts; + } + (read_ts, audit_ts) + }; + + // timestamp polynomials for row + let (row_read_ts, row_audit_ts) = timestamp_calc(num_ops, num_cells_row, &row); + let (col_read_ts, col_audit_ts) = timestamp_calc(num_ops, num_cells_col, &col); + + let to_vec_scalar = |v: &[usize]| -> Vec { + (0..v.len()) + .map(|i| G::Scalar::from(v[i] as u64)) + .collect::>() + }; + + Self { + ell, + // dense representation + row: to_vec_scalar(&row), + col: to_vec_scalar(&col), + val, + + // timestamp polynomials + row_read_ts: to_vec_scalar(&row_read_ts), + row_audit_ts: to_vec_scalar(&row_audit_ts), + col_read_ts: to_vec_scalar(&col_read_ts), + col_audit_ts: to_vec_scalar(&col_audit_ts), + } + } + + pub fn commit(&self, ck: &CommitmentKey) -> SparsePolynomialCommitment { + let comm_vec: Vec> = [ + &self.row, + &self.col, + &self.val, + &self.row_read_ts, + &self.row_audit_ts, + &self.col_read_ts, + &self.col_audit_ts, + ] + .par_iter() + .map(|v| G::CE::commit(ck, v)) + .collect(); + + SparsePolynomialCommitment { + ell: self.ell, + size: self.row.len(), + comm_row: comm_vec[0], + comm_col: comm_vec[1], + comm_val: comm_vec[2], + comm_row_read_ts: comm_vec[3], + comm_row_audit_ts: comm_vec[4], + comm_col_read_ts: comm_vec[5], + comm_col_audit_ts: comm_vec[6], + } + } + + pub fn multi_evaluate( + M_vec: &[&[(usize, usize, G::Scalar)]], + r_x: &[G::Scalar], + r_y: &[G::Scalar], + ) -> Vec { + let evaluate_with_table = + |M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar { + (0..M.len()) + .collect::>() + .par_iter() + .map(|&i| { + let (row, col, val) = M[i]; + T_x[row] * T_y[col] * val + }) + .reduce(G::Scalar::zero, |acc, x| acc + x) + }; + + let (T_x, T_y) = rayon::join( + || EqPolynomial::new(r_x.to_vec()).evals(), + || EqPolynomial::new(r_y.to_vec()).evals(), + ); + + (0..M_vec.len()) + .collect::>() + .par_iter() + .map(|&i| evaluate_with_table(M_vec[i], &T_x, &T_y)) + .collect() + } + + fn evaluation_oracles( + M: &[(usize, usize, G::Scalar)], + r_x: &[G::Scalar], + r_y: &[G::Scalar], + ) -> ( + Vec, + Vec, + Vec, + Vec, + ) { + let evaluation_oracles_with_table = |M: &[(usize, usize, G::Scalar)], + T_x: &[G::Scalar], + T_y: &[G::Scalar]| + -> (Vec, Vec) { + (0..M.len()) + .collect::>() + .par_iter() + .map(|&i| { + let (row, col, _val) = M[i]; + (T_x[row], T_y[col]) + }) + .collect::>() + .into_par_iter() + .unzip() + }; + + let (T_x, T_y) = rayon::join( + || EqPolynomial::new(r_x.to_vec()).evals(), + || EqPolynomial::new(r_y.to_vec()).evals(), + ); + + let (mut E_row, mut E_col) = evaluation_oracles_with_table(M, &T_x, &T_y); + + // resize the returned vectors + E_row.resize(M.len().next_power_of_two(), T_x[0]); // we place T_x[0] since resized row is appended with 0s + E_col.resize(M.len().next_power_of_two(), T_y[0]); + (E_row, E_col, T_x, T_y) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparseEvaluationArgument> { + // claimed evaluation + eval: G::Scalar, + + // oracles + comm_E_row: Commitment, + comm_E_col: Commitment, + + // proof of correct evaluation wrt oracles + sc_proof_eval: SumcheckProof, + eval_E_row: G::Scalar, + eval_E_col: G::Scalar, + eval_val: G::Scalar, + arg_eval: EE::EvaluationArgument, + + // proof that E_row is well-formed + eval_init_row: G::Scalar, + eval_read_row: G::Scalar, + eval_write_row: G::Scalar, + eval_audit_row: G::Scalar, + eval_init_col: G::Scalar, + eval_read_col: G::Scalar, + eval_write_col: G::Scalar, + eval_audit_col: G::Scalar, + sc_prod_init_audit_row: ProductArgumentBatched, + sc_prod_read_write_row_col: ProductArgumentBatched, + sc_prod_init_audit_col: ProductArgumentBatched, + eval_row: G::Scalar, + eval_row_read_ts: G::Scalar, + eval_E_row2: G::Scalar, + eval_row_audit_ts: G::Scalar, + eval_col: G::Scalar, + eval_col_read_ts: G::Scalar, + eval_E_col2: G::Scalar, + eval_col_audit_ts: G::Scalar, + arg_row_col_joint: EE::EvaluationArgument, + arg_row_audit_ts: EE::EvaluationArgument, + arg_col_audit_ts: EE::EvaluationArgument, +} + +impl> SparseEvaluationArgument { + pub fn prove( + ck: &CommitmentKey, + pk_ee: &EE::ProverKey, + poly: &SparsePolynomial, + sparse: &[(usize, usize, G::Scalar)], + comm: &SparsePolynomialCommitment, + r: &(&[G::Scalar], &[G::Scalar]), + transcript: &mut G::TE, + ) -> Result { + let (r_x, r_y) = r; + let eval = SparsePolynomial::::multi_evaluate(&[sparse], r_x, r_y)[0]; + + // compute oracles to prove the correctness of `eval` + let (E_row, E_col, T_x, T_y) = SparsePolynomial::::evaluation_oracles(sparse, r_x, r_y); + let val = poly.val.clone(); + + // commit to the two oracles + let comm_E_row = G::CE::commit(ck, &E_row); + let comm_E_col = G::CE::commit(ck, &E_col); + + // absorb the commitments and the claimed evaluation + transcript.absorb(b"E", &vec![comm_E_row, comm_E_col].as_slice()); + transcript.absorb(b"e", &eval); + + let comb_func_eval = |poly_A_comp: &G::Scalar, + poly_B_comp: &G::Scalar, + poly_C_comp: &G::Scalar| + -> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; + let (sc_proof_eval, r_eval, claims_eval) = SumcheckProof::::prove_cubic( + &eval, + E_row.len().log_2(), // number of rounds + &mut MultilinearPolynomial::new(E_row.clone()), + &mut MultilinearPolynomial::new(E_col.clone()), + &mut MultilinearPolynomial::new(val.clone()), + comb_func_eval, + transcript, + )?; + + // prove evaluations of E_row, E_col and val at r_eval + let rho = transcript.squeeze(b"r")?; + let comm_joint = comm_E_row + comm_E_col * rho + comm.comm_val * rho * rho; + let eval_joint = claims_eval[0] + rho * claims_eval[1] + rho * rho * claims_eval[2]; + let poly_eval = E_row + .iter() + .zip(E_col.iter()) + .zip(val.iter()) + .map(|((a, b), c)| *a + rho * *b + rho * rho * *c) + .collect::>(); + let arg_eval = EE::prove( + ck, + pk_ee, + transcript, + &comm_joint, + &poly_eval, + &r_eval, + &eval_joint, + )?; + + // we now need to prove that E_row and E_col are well-formed + // we use memory checking: H(INIT) * H(WS) =? H(RS) * H(FINAL) + let gamma_1 = transcript.squeeze(b"g1")?; + let gamma_2 = transcript.squeeze(b"g2")?; + + let gamma_1_sqr = gamma_1 * gamma_1; + let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar { + (*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2 + }; + + let init_row = (0..T_x.len()) + .map(|i| hash_func(&G::Scalar::from(i as u64), &T_x[i], &G::Scalar::zero())) + .collect::>(); + let read_row = (0..E_row.len()) + .map(|i| hash_func(&poly.row[i], &E_row[i], &poly.row_read_ts[i])) + .collect::>(); + let write_row = (0..E_row.len()) + .map(|i| { + hash_func( + &poly.row[i], + &E_row[i], + &(poly.row_read_ts[i] + G::Scalar::one()), + ) + }) + .collect::>(); + let audit_row = (0..T_x.len()) + .map(|i| hash_func(&G::Scalar::from(i as u64), &T_x[i], &poly.row_audit_ts[i])) + .collect::>(); + let init_col = (0..T_y.len()) + .map(|i| hash_func(&G::Scalar::from(i as u64), &T_y[i], &G::Scalar::zero())) + .collect::>(); + let read_col = (0..E_col.len()) + .map(|i| hash_func(&poly.col[i], &E_col[i], &poly.col_read_ts[i])) + .collect::>(); + let write_col = (0..E_col.len()) + .map(|i| { + hash_func( + &poly.col[i], + &E_col[i], + &(poly.col_read_ts[i] + G::Scalar::one()), + ) + }) + .collect::>(); + let audit_col = (0..T_y.len()) + .map(|i| hash_func(&G::Scalar::from(i as u64), &T_y[i], &poly.col_audit_ts[i])) + .collect::>(); + + let (sc_prod_init_audit_row, eval_init_audit_row, r_init_audit_row) = + ProductArgumentBatched::prove( + &[ + &MultilinearPolynomial::new(init_row), + &MultilinearPolynomial::new(audit_row), + ], + transcript, + )?; + + assert_eq!(init_col.len(), audit_col.len()); + let (sc_prod_init_audit_col, eval_init_audit_col, r_init_audit_col) = + ProductArgumentBatched::prove( + &[ + &MultilinearPolynomial::new(init_col), + &MultilinearPolynomial::new(audit_col), + ], + transcript, + )?; + + assert_eq!(read_row.len(), write_row.len()); + assert_eq!(read_row.len(), read_col.len()); + assert_eq!(read_row.len(), write_col.len()); + + let (sc_prod_read_write_row_col, eval_read_write_row_col, r_read_write_row_col) = + ProductArgumentBatched::prove( + &[ + &MultilinearPolynomial::new(read_row), + &MultilinearPolynomial::new(write_row), + &MultilinearPolynomial::new(read_col), + &MultilinearPolynomial::new(write_col), + ], + transcript, + )?; + + // row-related claims of polynomial evaluations to aid the final check of the sum-check + let eval_row = MultilinearPolynomial::evaluate_with(&poly.row, &r_read_write_row_col); + let eval_row_read_ts = + MultilinearPolynomial::evaluate_with(&poly.row_read_ts, &r_read_write_row_col); + let eval_E_row2 = MultilinearPolynomial::evaluate_with(&E_row, &r_read_write_row_col); + let eval_row_audit_ts = + MultilinearPolynomial::evaluate_with(&poly.row_audit_ts, &r_init_audit_row); + + // col-related claims of polynomial evaluations to aid the final check of the sum-check + let eval_col = MultilinearPolynomial::evaluate_with(&poly.col, &r_read_write_row_col); + let eval_col_read_ts = + MultilinearPolynomial::evaluate_with(&poly.col_read_ts, &r_read_write_row_col); + let eval_E_col2 = MultilinearPolynomial::evaluate_with(&E_col, &r_read_write_row_col); + let eval_col_audit_ts = + MultilinearPolynomial::evaluate_with(&poly.col_audit_ts, &r_init_audit_col); + + // we can batch prove the first three claims + transcript.absorb( + b"e", + &[ + eval_row, + eval_row_read_ts, + eval_E_row2, + eval_col, + eval_col_read_ts, + eval_E_col2, + ] + .as_slice(), + ); + let c = transcript.squeeze(b"c")?; + let eval_joint = eval_row + + c * eval_row_read_ts + + c * c * eval_E_row2 + + c * c * c * eval_col + + c * c * c * c * eval_col_read_ts + + c * c * c * c * c * eval_E_col2; + let comm_joint = comm.comm_row + + comm.comm_row_read_ts * c + + comm_E_row * c * c + + comm.comm_col * c * c * c + + comm.comm_col_read_ts * c * c * c * c + + comm_E_col * c * c * c * c * c; + let poly_joint = poly + .row + .iter() + .zip(poly.row_read_ts.iter()) + .zip(E_row.into_iter()) + .zip(poly.col.iter()) + .zip(poly.col_read_ts.iter()) + .zip(E_col.into_iter()) + .map(|(((((x, y), z), m), n), q)| { + *x + c * y + c * c * z + c * c * c * m + c * c * c * c * n + c * c * c * c * c * q + }) + .collect::>(); + + let arg_row_col_joint = EE::prove( + ck, + pk_ee, + transcript, + &comm_joint, + &poly_joint, + &r_read_write_row_col, + &eval_joint, + )?; + + let arg_row_audit_ts = EE::prove( + ck, + pk_ee, + transcript, + &comm.comm_row_audit_ts, + &poly.row_audit_ts, + &r_init_audit_row, + &eval_row_audit_ts, + )?; + + let arg_col_audit_ts = EE::prove( + ck, + pk_ee, + transcript, + &comm.comm_col_audit_ts, + &poly.col_audit_ts, + &r_init_audit_col, + &eval_col_audit_ts, + )?; + + Ok(Self { + // claimed evaluation + eval, + + // oracles + comm_E_row, + comm_E_col, + + // proof of correct evaluation wrt oracles + sc_proof_eval, + eval_E_row: claims_eval[0], + eval_E_col: claims_eval[1], + eval_val: claims_eval[2], + arg_eval, + + // proof that E_row and E_row are well-formed + eval_init_row: eval_init_audit_row[0], + eval_read_row: eval_read_write_row_col[0], + eval_write_row: eval_read_write_row_col[1], + eval_audit_row: eval_init_audit_row[1], + eval_init_col: eval_init_audit_col[0], + eval_read_col: eval_read_write_row_col[2], + eval_write_col: eval_read_write_row_col[3], + eval_audit_col: eval_init_audit_col[1], + sc_prod_init_audit_row, + sc_prod_read_write_row_col, + sc_prod_init_audit_col, + eval_row, + eval_row_read_ts, + eval_E_row2, + eval_row_audit_ts, + eval_col, + eval_col_read_ts, + eval_E_col2, + eval_col_audit_ts, + arg_row_col_joint, + arg_row_audit_ts, + arg_col_audit_ts, + }) + } + + pub fn verify( + &self, + vk_ee: &EE::VerifierKey, + comm: &SparsePolynomialCommitment, + r: &(&[G::Scalar], &[G::Scalar]), + transcript: &mut G::TE, + ) -> Result { + let (r_x, r_y) = r; + + // append the transcript and scalar + transcript.absorb(b"E", &vec![self.comm_E_row, self.comm_E_col].as_slice()); + transcript.absorb(b"e", &self.eval); + + // (1) verify the correct evaluation of sparse polynomial + let (claim_eval_final, r_eval) = self.sc_proof_eval.verify( + self.eval, + comm.size.next_power_of_two().log_2(), + 3, + transcript, + )?; + // verify the last step of the sum-check + if claim_eval_final != self.eval_E_row * self.eval_E_col * self.eval_val { + return Err(NovaError::InvalidSumcheckProof); + } + + // prove evaluations of E_row, E_col and val at r_eval + let rho = transcript.squeeze(b"r")?; + let comm_joint = self.comm_E_row + self.comm_E_col * rho + comm.comm_val * rho * rho; + let eval_joint = self.eval_E_row + rho * self.eval_E_col + rho * rho * self.eval_val; + EE::verify( + vk_ee, + transcript, + &comm_joint, + &r_eval, + &eval_joint, + &self.arg_eval, + )?; + + // (2) verify if E_row and E_col are well formed + let gamma_1 = transcript.squeeze(b"g1")?; + let gamma_2 = transcript.squeeze(b"g2")?; + + // hash function + let gamma_1_sqr = gamma_1 * gamma_1; + let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar { + (*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2 + }; + + // check the required multiset relationship + // row + if self.eval_init_row * self.eval_write_row != self.eval_read_row * self.eval_audit_row { + return Err(NovaError::InvalidMultisetProof); + } + // col + if self.eval_init_col * self.eval_write_col != self.eval_read_col * self.eval_audit_col { + return Err(NovaError::InvalidMultisetProof); + } + + // verify the product proofs + let (claim_init_audit_row, r_init_audit_row) = self.sc_prod_init_audit_row.verify( + &[self.eval_init_row, self.eval_audit_row], + comm.ell.0.pow2(), + transcript, + )?; + let (claim_init_audit_col, r_init_audit_col) = self.sc_prod_init_audit_col.verify( + &[self.eval_init_col, self.eval_audit_col], + comm.ell.1.pow2(), + transcript, + )?; + let (claim_read_write_row_col, r_read_write_row_col) = self.sc_prod_read_write_row_col.verify( + &[ + self.eval_read_row, + self.eval_write_row, + self.eval_read_col, + self.eval_write_col, + ], + comm.size, + transcript, + )?; + + // finish the final step of the three sum-checks + let (claim_init_expected_row, claim_audit_expected_row) = { + let addr = IdentityPolynomial::new(r_init_audit_row.len()).evaluate(&r_init_audit_row); + let val = EqPolynomial::new(r_x.to_vec()).evaluate(&r_init_audit_row); + + ( + hash_func(&addr, &val, &G::Scalar::zero()), + hash_func(&addr, &val, &self.eval_row_audit_ts), + ) + }; + + let (claim_read_expected_row, claim_write_expected_row) = { + ( + hash_func(&self.eval_row, &self.eval_E_row2, &self.eval_row_read_ts), + hash_func( + &self.eval_row, + &self.eval_E_row2, + &(self.eval_row_read_ts + G::Scalar::one()), + ), + ) + }; + + // multiset check for the row + if claim_init_expected_row != claim_init_audit_row[0] + || claim_audit_expected_row != claim_init_audit_row[1] + || claim_read_expected_row != claim_read_write_row_col[0] + || claim_write_expected_row != claim_read_write_row_col[1] + { + return Err(NovaError::InvalidSumcheckProof); + } + + let (claim_init_expected_col, claim_audit_expected_col) = { + let addr = IdentityPolynomial::new(r_init_audit_col.len()).evaluate(&r_init_audit_col); + let val = EqPolynomial::new(r_y.to_vec()).evaluate(&r_init_audit_col); + + ( + hash_func(&addr, &val, &G::Scalar::zero()), + hash_func(&addr, &val, &self.eval_col_audit_ts), + ) + }; + + let (claim_read_expected_col, claim_write_expected_col) = { + ( + hash_func(&self.eval_col, &self.eval_E_col2, &self.eval_col_read_ts), + hash_func( + &self.eval_col, + &self.eval_E_col2, + &(self.eval_col_read_ts + G::Scalar::one()), + ), + ) + }; + + // multiset check for the col + if claim_init_expected_col != claim_init_audit_col[0] + || claim_audit_expected_col != claim_init_audit_col[1] + || claim_read_expected_col != claim_read_write_row_col[2] + || claim_write_expected_col != claim_read_write_row_col[3] + { + return Err(NovaError::InvalidSumcheckProof); + } + + transcript.absorb( + b"e", + &[ + self.eval_row, + self.eval_row_read_ts, + self.eval_E_row2, + self.eval_col, + self.eval_col_read_ts, + self.eval_E_col2, + ] + .as_slice(), + ); + let c = transcript.squeeze(b"c")?; + let eval_joint = self.eval_row + + c * self.eval_row_read_ts + + c * c * self.eval_E_row2 + + c * c * c * self.eval_col + + c * c * c * c * self.eval_col_read_ts + + c * c * c * c * c * self.eval_E_col2; + let comm_joint = comm.comm_row + + comm.comm_row_read_ts * c + + self.comm_E_row * c * c + + comm.comm_col * c * c * c + + comm.comm_col_read_ts * c * c * c * c + + self.comm_E_col * c * c * c * c * c; + + EE::verify( + vk_ee, + transcript, + &comm_joint, + &r_read_write_row_col, + &eval_joint, + &self.arg_row_col_joint, + )?; + + EE::verify( + vk_ee, + transcript, + &comm.comm_row_audit_ts, + &r_init_audit_row, + &self.eval_row_audit_ts, + &self.arg_row_audit_ts, + )?; + + EE::verify( + vk_ee, + transcript, + &comm.comm_col_audit_ts, + &r_init_audit_col, + &self.eval_col_audit_ts, + &self.arg_col_audit_ts, + )?; + + Ok(self.eval) + } +} diff --git a/src/spartan/sumcheck.rs b/src/spartan/sumcheck.rs index f73ae05..ec0fd19 100644 --- a/src/spartan/sumcheck.rs +++ b/src/spartan/sumcheck.rs @@ -8,13 +8,17 @@ use ff::Field; use rayon::prelude::*; use serde::{Deserialize, Serialize}; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] pub(crate) struct SumcheckProof { compressed_polys: Vec>, } impl SumcheckProof { + pub fn new(compressed_polys: Vec>) -> Self { + Self { compressed_polys } + } + pub fn verify( &self, claim: G::Scalar, @@ -302,7 +306,7 @@ pub struct UniPoly { // ax^2 + bx + c stored as vec![a,c] // ax^3 + bx^2 + cx + d stored as vec![a,c,d] -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct CompressedUniPoly { coeffs_except_linear_term: Vec, _p: PhantomData, diff --git a/src/traits/snark.rs b/src/traits/snark.rs index 20289f0..e2c22d0 100644 --- a/src/traits/snark.rs +++ b/src/traits/snark.rs @@ -19,7 +19,10 @@ pub trait RelaxedR1CSSNARKTrait: type VerifierKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; /// Produces the keys for the prover and the verifier - fn setup(ck: &CommitmentKey, S: &R1CSShape) -> (Self::ProverKey, Self::VerifierKey); + fn setup( + ck: &CommitmentKey, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; /// Produces a new SNARK for a relaxed R1CS fn prove(