From b76d7aa7ea53970d81614df364f8bd8f43d8f8e4 Mon Sep 17 00:00:00 2001 From: Srinath Setty Date: Mon, 10 Apr 2023 11:31:04 -0700 Subject: [PATCH] batch sum-checks (#161) --- Cargo.toml | 2 +- src/spartan/mod.rs | 74 +- src/spartan/pp.rs | 2279 +++++++++++++++++++++++++++++++++++++ src/spartan/pp/mod.rs | 1576 ------------------------- src/spartan/pp/product.rs | 629 ---------- src/spartan/sumcheck.rs | 88 -- 6 files changed, 2330 insertions(+), 2318 deletions(-) create mode 100644 src/spartan/pp.rs delete mode 100644 src/spartan/pp/mod.rs delete mode 100644 src/spartan/pp/product.rs diff --git a/Cargo.toml b/Cargo.toml index 394a75f..78a6d00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nova-snark" -version = "0.20.2" +version = "0.20.3" authors = ["Srinath Setty "] edition = "2021" description = "Recursive zkSNARKs without trusted setup" diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index 2e0de4e..a36ef14 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -20,6 +20,16 @@ use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sumcheck::SumcheckProof; +fn powers(s: &G::Scalar, n: usize) -> Vec { + assert!(n >= 1); + let mut powers = Vec::new(); + powers.push(G::Scalar::one()); + for i in 1..n { + powers.push(powers[i - 1] * s); + } + powers +} + /// A type that holds a witness to a polynomial evaluation instance pub struct PolyEvalWitness { p: Vec, // polynomial @@ -51,6 +61,17 @@ impl PolyEvalWitness { } PolyEvalWitness { p } } + + fn batch(p_vec: &[&Vec], s: &G::Scalar) -> PolyEvalWitness { + let powers_of_s = powers::(s, p_vec.len()); + let mut p = vec![G::Scalar::zero(); p_vec[0].len()]; + for i in 0..p_vec.len() { + for (j, item) in p.iter_mut().enumerate().take(p_vec[i].len()) { + *item += p_vec[i][j] * powers_of_s[i] + } + } + PolyEvalWitness { p } + } } /// A type that holds a polynomial evaluation instance @@ -75,6 +96,31 @@ impl PolyEvalInstance { Vec::new() } } + + fn batch( + c_vec: &[Commitment], + x: &[G::Scalar], + e_vec: &[G::Scalar], + s: &G::Scalar, + ) -> PolyEvalInstance { + let powers_of_s = powers::(s, c_vec.len()); + let e = e_vec + .iter() + .zip(powers_of_s.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + let c = c_vec + .iter() + .zip(powers_of_s.iter()) + .map(|(c, p)| *c * *p) + .fold(Commitment::::default(), |acc, item| acc + item); + + PolyEvalInstance { + c, + x: x.to_vec(), + e, + } + } } /// A type that represents the prover's key @@ -312,20 +358,10 @@ impl> RelaxedR1CSSNARKTrait Vec { - assert!(n >= 1); - let mut powers = Vec::new(); - powers.push(G::Scalar::one()); - for i in 1..n { - powers.push(powers[i - 1] * s); - } - powers - }; - // generate a challenge let rho = transcript.squeeze(b"r")?; let num_claims = w_vec_padded.len(); - let powers_of_rho = powers(&rho, num_claims); + let powers_of_rho = powers::(&rho, num_claims); let claim_batch_joint = u_vec_padded .iter() .zip(powers_of_rho.iter()) @@ -361,7 +397,7 @@ impl> RelaxedR1CSSNARKTrait = powers(&gamma, num_claims); + let powers_of_gamma: Vec = powers::(&gamma, num_claims); let comm_joint = u_vec_padded .iter() .zip(powers_of_gamma.iter()) @@ -517,20 +553,10 @@ impl> RelaxedR1CSSNARKTrait Vec { - assert!(n >= 1); - let mut powers = Vec::new(); - powers.push(G::Scalar::one()); - for i in 1..n { - powers.push(powers[i - 1] * s); - } - powers - }; - // generate a challenge let rho = transcript.squeeze(b"r")?; let num_claims = u_vec.len(); - let powers_of_rho = powers(&rho, num_claims); + let powers_of_rho = powers::(&rho, num_claims); let claim_batch_joint = u_vec .iter() .zip(powers_of_rho.iter()) @@ -566,7 +592,7 @@ impl> RelaxedR1CSSNARKTrait = powers(&gamma, num_claims); + let powers_of_gamma: Vec = powers::(&gamma, num_claims); let comm_joint = u_vec_padded .iter() .zip(powers_of_gamma.iter()) diff --git a/src/spartan/pp.rs b/src/spartan/pp.rs new file mode 100644 index 0000000..a00ba4f --- /dev/null +++ b/src/spartan/pp.rs @@ -0,0 +1,2279 @@ +//! This module implements RelaxedR1CSSNARK traits using a spark-based approach to prove evaluations of +//! sparse multilinear polynomials involved in Spartan's sum-check protocol, thereby providing a preprocessing SNARK +use crate::{ + bellperson::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + spartan::{ + math::Math, + polynomial::{EqPolynomial, MultilinearPolynomial}, + powers, + sumcheck::{CompressedUniPoly, SumcheckProof, UniPoly}, + PolyEvalInstance, PolyEvalWitness, SparsePolynomial, + }, + traits::{ + circuit::StepCircuit, + commitment::{CommitmentEngineTrait, CommitmentTrait}, + evaluation::EvaluationEngineTrait, + snark::RelaxedR1CSSNARKTrait, + Group, TranscriptEngineTrait, TranscriptReprTrait, + }, + Commitment, CommitmentKey, CompressedCommitment, +}; +use bellperson::{gadgets::num::AllocatedNum, Circuit, ConstraintSystem, SynthesisError}; +use core::{cmp::max, marker::PhantomData}; +use ff::{Field, PrimeField}; +use itertools::concat; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +fn vec_to_arr(v: Vec) -> [T; N] { + v.try_into() + .unwrap_or_else(|v: Vec| panic!("Expected a Vec of length {} but it was {}", N, v.len())) +} + +struct IdentityPolynomial { + ell: usize, + _p: PhantomData, +} + +impl IdentityPolynomial { + pub fn new(ell: usize) -> Self { + IdentityPolynomial { + ell, + _p: Default::default(), + } + } + + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.ell, r.len()); + (0..self.ell) + .map(|i| Scalar::from(2_usize.pow((self.ell - i - 1) as u32) as u64) * r[i]) + .fold(Scalar::zero(), |acc, item| acc + item) + } +} + +/// A type that holds R1CSShape in a form amenable to memory checking +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSShapeSparkRepr { + N: usize, // size of the vectors + + // dense representation + row: Vec, + col: Vec, + val_A: Vec, + val_B: Vec, + val_C: Vec, + + // timestamp polynomials + row_read_ts: Vec, + row_audit_ts: Vec, + col_read_ts: Vec, + col_audit_ts: Vec, +} + +/// A type that holds a commitment to a sparse polynomial +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSShapeSparkCommitment { + N: usize, // size of each vector + + // commitments to the dense representation + comm_row: Commitment, + comm_col: Commitment, + comm_val_A: Commitment, + comm_val_B: Commitment, + comm_val_C: Commitment, + + // commitments to the timestamp polynomials + comm_row_read_ts: Commitment, + comm_row_audit_ts: Commitment, + comm_col_read_ts: Commitment, + comm_col_audit_ts: Commitment, +} + +impl TranscriptReprTrait for R1CSShapeSparkCommitment { + fn to_transcript_bytes(&self) -> Vec { + [ + self.comm_row, + self.comm_col, + self.comm_val_A, + self.comm_val_B, + self.comm_val_C, + self.comm_row_read_ts, + self.comm_row_audit_ts, + self.comm_col_read_ts, + self.comm_col_audit_ts, + ] + .as_slice() + .to_transcript_bytes() + } +} + +impl R1CSShapeSparkRepr { + /// represents R1CSShape in a Spark-friendly format amenable to memory checking + pub fn new(S: &R1CSShape) -> R1CSShapeSparkRepr { + let N = { + let total_nz = S.A.len() + S.B.len() + S.C.len(); + max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() + }; + + let row = { + let mut r = S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, _, _)| *r) + .collect::>(); + r.resize(N, 0usize); + r + }; + + let col = { + let mut c = S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(_, c, _)| *c) + .collect::>(); + c.resize(N, 0usize); + c + }; + + let val_A = { + let mut val = S.A.iter().map(|(_, _, v)| *v).collect::>(); + val.resize(N, G::Scalar::zero()); + val + }; + + let val_B = { + // prepend zeros + let mut val = vec![G::Scalar::zero(); S.A.len()]; + val.extend(S.B.iter().map(|(_, _, v)| *v).collect::>()); + // append zeros + val.resize(N, G::Scalar::zero()); + val + }; + + let val_C = { + // prepend zeros + let mut val = vec![G::Scalar::zero(); S.A.len() + S.B.len()]; + val.extend(S.C.iter().map(|(_, _, v)| *v).collect::>()); + // append zeros + val.resize(N, G::Scalar::zero()); + val + }; + + // timestamp calculation routine + let timestamp_calc = + |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> (Vec, Vec) { + let mut read_ts = vec![0usize; num_ops]; + let mut audit_ts = vec![0usize; num_cells]; + + assert!(num_ops >= addr_trace.len()); + for i in 0..addr_trace.len() { + let addr = addr_trace[i]; + assert!(addr < num_cells); + let r_ts = audit_ts[addr]; + read_ts[i] = r_ts; + + let w_ts = r_ts + 1; + audit_ts[addr] = w_ts; + } + (read_ts, audit_ts) + }; + + // timestamp polynomials for row + let (row_read_ts, row_audit_ts) = timestamp_calc(N, N, &row); + let (col_read_ts, col_audit_ts) = timestamp_calc(N, N, &col); + + // a routine to turn a vector of usize into a vector scalars + let to_vec_scalar = |v: &[usize]| -> Vec { + (0..v.len()) + .map(|i| G::Scalar::from(v[i] as u64)) + .collect::>() + }; + + R1CSShapeSparkRepr { + N, + + // dense representation + row: to_vec_scalar(&row), + col: to_vec_scalar(&col), + val_A, + val_B, + val_C, + + // timestamp polynomials + row_read_ts: to_vec_scalar(&row_read_ts), + row_audit_ts: to_vec_scalar(&row_audit_ts), + col_read_ts: to_vec_scalar(&col_read_ts), + col_audit_ts: to_vec_scalar(&col_audit_ts), + } + } + + fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { + let comm_vec: Vec> = [ + &self.row, + &self.col, + &self.val_A, + &self.val_B, + &self.val_C, + &self.row_read_ts, + &self.row_audit_ts, + &self.col_read_ts, + &self.col_audit_ts, + ] + .par_iter() + .map(|v| G::CE::commit(ck, v)) + .collect(); + + R1CSShapeSparkCommitment { + N: self.row.len(), + comm_row: comm_vec[0], + comm_col: comm_vec[1], + comm_val_A: comm_vec[2], + comm_val_B: comm_vec[3], + comm_val_C: comm_vec[4], + comm_row_read_ts: comm_vec[5], + comm_row_audit_ts: comm_vec[6], + comm_col_read_ts: comm_vec[7], + comm_col_audit_ts: comm_vec[8], + } + } + + // computes evaluation oracles + fn evaluation_oracles( + &self, + S: &R1CSShape, + r_x: &[G::Scalar], + z: &[G::Scalar], + ) -> ( + Vec, + Vec, + Vec, + Vec, + ) { + let r_x_padded = { + let mut x = vec![G::Scalar::zero(); self.N.log_2() - r_x.len()]; + x.extend(r_x); + x + }; + + let mem_row = EqPolynomial::new(r_x_padded).evals(); + let mem_col = { + let mut z = z.to_vec(); + z.resize(self.N, G::Scalar::zero()); + z + }; + + let mut E_row = S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, _, _)| mem_row[*r]) + .collect::>(); + + let mut E_col = S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(_, c, _)| mem_col[*c]) + .collect::>(); + + E_row.resize(self.N, mem_row[0]); // we place mem_row[0] since resized row is appended with 0s + E_col.resize(self.N, mem_col[0]); + + (mem_row, mem_col, E_row, E_col) + } +} + +/// Defines a trait for implementing sum-check in a generic manner +pub trait SumcheckEngine { + /// returns the initial claims + fn initial_claims(&self) -> Vec; + + /// degree of the sum-check polynomial + fn degree(&self) -> usize; + + /// the size of the polynomials + fn size(&self) -> usize; + + /// returns evaluation points at 0, 2, d-1 (where d is the degree of the sum-check polynomial) + fn evaluation_points(&self) -> Vec>; + + /// bounds a variable in the constituent polynomials + fn bound(&mut self, r: &G::Scalar); + + /// returns the final claims + fn final_claims(&self) -> Vec>; +} + +struct ProductSumcheckInstance { + pub(crate) claims: Vec, // claimed products + pub(crate) comm_output_vec: Vec>, + + input_vec: Vec>, + output_vec: Vec>, + + poly_A: MultilinearPolynomial, + poly_B_vec: Vec>, + poly_C_vec: Vec>, + poly_D_vec: Vec>, +} + +impl ProductSumcheckInstance { + pub fn new( + ck: &CommitmentKey, + input_vec: Vec>, // list of input vectors + transcript: &mut G::TE, + ) -> Result { + let compute_layer = |input: &[G::Scalar]| -> (Vec, Vec, Vec) { + let left = (0..input.len() / 2) + .map(|i| input[2 * i]) + .collect::>(); + + let right = (0..input.len() / 2) + .map(|i| input[2 * i + 1]) + .collect::>(); + + assert_eq!(left.len(), right.len()); + + let output = (0..left.len()) + .map(|i| left[i] * right[i]) + .collect::>(); + + (left, right, output) + }; + + // a closure that returns left, right, output, product + let prepare_inputs = + |input: &[G::Scalar]| -> (Vec, Vec, Vec, G::Scalar) { + let mut left: Vec = Vec::new(); + let mut right: Vec = Vec::new(); + let mut output: Vec = Vec::new(); + + let mut out = input.to_vec(); + for _i in 0..input.len().log_2() { + let (l, r, o) = compute_layer(&out); + out = o.clone(); + + left.extend(l); + right.extend(r); + output.extend(o); + } + + // add a dummy product operation to make the left.len() == right.len() == output.len() == input.len() + left.push(output[output.len() - 1]); + right.push(G::Scalar::zero()); + output.push(G::Scalar::zero()); + + // output is stored at the last but one position + let product = output[output.len() - 2]; + + assert_eq!(left.len(), right.len()); + assert_eq!(left.len(), output.len()); + (left, right, output, product) + }; + + let mut left_vec = Vec::new(); + let mut right_vec = Vec::new(); + let mut output_vec = Vec::new(); + let mut claims = Vec::new(); + for input in input_vec.iter() { + let (l, r, o, p) = prepare_inputs(input); + left_vec.push(l); + right_vec.push(r); + output_vec.push(o); + claims.push(p); + } + + // commit to the outputs + let comm_output_vec = (0..output_vec.len()) + .into_par_iter() + .map(|i| G::CE::commit(ck, &output_vec[i])) + .collect::>(); + + // absorb the output commitment and the claimed product + transcript.absorb(b"o", &comm_output_vec.as_slice()); + transcript.absorb(b"c", &claims.as_slice()); + + // generate randomness for the eq polynomial + let num_rounds = output_vec[0].len().log_2(); + let rand_eq = (0..num_rounds) + .map(|_i| transcript.squeeze(b"e")) + .collect::, NovaError>>()?; + + let poly_A = MultilinearPolynomial::new(EqPolynomial::new(rand_eq).evals()); + let poly_B_vec = left_vec + .clone() + .into_par_iter() + .map(MultilinearPolynomial::new) + .collect::>(); + let poly_C_vec = right_vec + .clone() + .into_par_iter() + .map(MultilinearPolynomial::new) + .collect::>(); + let poly_D_vec = output_vec + .clone() + .into_par_iter() + .map(MultilinearPolynomial::new) + .collect::>(); + + Ok(Self { + claims, + comm_output_vec, + input_vec, + output_vec, + poly_A, + poly_B_vec, + poly_C_vec, + poly_D_vec, + }) + } +} + +impl SumcheckEngine for ProductSumcheckInstance { + fn initial_claims(&self) -> Vec { + vec![G::Scalar::zero(); 8] + } + + fn degree(&self) -> usize { + 3 + } + + fn size(&self) -> usize { + for poly_B in &self.poly_B_vec { + assert_eq!(poly_B.len(), self.poly_A.len()); + } + for poly_C in &self.poly_C_vec { + assert_eq!(poly_C.len(), self.poly_A.len()); + } + for poly_D in &self.poly_D_vec { + assert_eq!(poly_D.len(), self.poly_A.len()); + } + self.poly_A.len() + } + + fn evaluation_points(&self) -> Vec> { + let poly_A = &self.poly_A; + + let comb_func = + |poly_A_comp: &G::Scalar, + poly_B_comp: &G::Scalar, + poly_C_comp: &G::Scalar, + poly_D_comp: &G::Scalar| + -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + self + .poly_B_vec + .iter() + .zip(self.poly_C_vec.iter()) + .zip(self.poly_D_vec.iter()) + .map(|((poly_B, poly_C), poly_D)| { + let len = poly_B.len() / 2; + // Make an iterator returning the contributions to the evaluations + let (eval_point_0, eval_point_2, eval_point_3) = (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; + let eval_point_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; + let eval_point_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (G::Scalar::zero(), G::Scalar::zero(), G::Scalar::zero()), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ); + vec![eval_point_0, eval_point_2, eval_point_3] + }) + .collect::>>() + } + + fn bound(&mut self, r: &G::Scalar) { + self.poly_A.bound_poly_var_top(r); + for ((poly_B, poly_C), poly_D) in self + .poly_B_vec + .iter_mut() + .zip(self.poly_C_vec.iter_mut()) + .zip(self.poly_D_vec.iter_mut()) + { + poly_B.bound_poly_var_top(r); + poly_C.bound_poly_var_top(r); + poly_D.bound_poly_var_top(r); + } + } + fn final_claims(&self) -> Vec> { + let poly_A_final = vec![self.poly_A[0]]; + let poly_B_final = (0..self.poly_B_vec.len()) + .map(|i| self.poly_B_vec[i][0]) + .collect(); + let poly_C_final = (0..self.poly_C_vec.len()) + .map(|i| self.poly_C_vec[i][0]) + .collect(); + let poly_D_final = (0..self.poly_D_vec.len()) + .map(|i| self.poly_D_vec[i][0]) + .collect(); + + vec![poly_A_final, poly_B_final, poly_C_final, poly_D_final] + } +} + +struct OuterSumcheckInstance { + poly_tau: MultilinearPolynomial, + poly_Az: MultilinearPolynomial, + poly_Bz: MultilinearPolynomial, + poly_uCz_E: MultilinearPolynomial, +} + +impl SumcheckEngine for OuterSumcheckInstance { + fn initial_claims(&self) -> Vec { + vec![G::Scalar::zero()] + } + + fn degree(&self) -> usize { + 3 + } + + fn size(&self) -> usize { + assert_eq!(self.poly_tau.len(), self.poly_Az.len()); + assert_eq!(self.poly_tau.len(), self.poly_Bz.len()); + assert_eq!(self.poly_tau.len(), self.poly_uCz_E.len()); + self.poly_tau.len() + } + + fn evaluation_points(&self) -> Vec> { + let (poly_A, poly_B, poly_C, poly_D) = ( + &self.poly_tau, + &self.poly_Az, + &self.poly_Bz, + &self.poly_uCz_E, + ); + let comb_func = + |poly_A_comp: &G::Scalar, + poly_B_comp: &G::Scalar, + poly_C_comp: &G::Scalar, + poly_D_comp: &G::Scalar| + -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + let len = poly_A.len() / 2; + + // Make an iterator returning the contributions to the evaluations + let (eval_point_0, eval_point_2, eval_point_3) = (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; + let eval_point_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; + let eval_point_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (G::Scalar::zero(), G::Scalar::zero(), G::Scalar::zero()), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ); + + vec![vec![eval_point_0, eval_point_2, eval_point_3]] + } + + fn bound(&mut self, r: &G::Scalar) { + self.poly_tau.bound_poly_var_top(r); + self.poly_Az.bound_poly_var_top(r); + self.poly_Bz.bound_poly_var_top(r); + self.poly_uCz_E.bound_poly_var_top(r); + } + + fn final_claims(&self) -> Vec> { + vec![vec![ + self.poly_tau[0], + self.poly_Az[0], + self.poly_Bz[0], + self.poly_uCz_E[0], + ]] + } +} + +struct InnerSumcheckInstance { + claim: G::Scalar, + poly_E_row: MultilinearPolynomial, + poly_E_col: MultilinearPolynomial, + poly_val: MultilinearPolynomial, +} + +impl SumcheckEngine for InnerSumcheckInstance { + fn initial_claims(&self) -> Vec { + vec![self.claim] + } + + fn degree(&self) -> usize { + 3 + } + + fn size(&self) -> usize { + assert_eq!(self.poly_E_row.len(), self.poly_val.len()); + assert_eq!(self.poly_E_row.len(), self.poly_E_col.len()); + self.poly_E_row.len() + } + + fn evaluation_points(&self) -> Vec> { + let (poly_A, poly_B, poly_C) = (&self.poly_E_row, &self.poly_E_col, &self.poly_val); + let comb_func = |poly_A_comp: &G::Scalar, + poly_B_comp: &G::Scalar, + poly_C_comp: &G::Scalar| + -> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; + let len = poly_A.len() / 2; + + // Make an iterator returning the contributions to the evaluations + let (eval_point_0, eval_point_2, eval_point_3) = (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + let eval_point_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + let eval_point_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (G::Scalar::zero(), G::Scalar::zero(), G::Scalar::zero()), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ); + + vec![vec![eval_point_0, eval_point_2, eval_point_3]] + } + + fn bound(&mut self, r: &G::Scalar) { + self.poly_E_row.bound_poly_var_top(r); + self.poly_E_col.bound_poly_var_top(r); + self.poly_val.bound_poly_var_top(r); + } + + fn final_claims(&self) -> Vec> { + vec![vec![ + self.poly_E_row[0], + self.poly_E_col[0], + self.poly_val[0], + ]] + } +} + +/// A type that represents the prover's key +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey> { + pk_ee: EE::ProverKey, + S: R1CSShape, + S_repr: R1CSShapeSparkRepr, + S_comm: R1CSShapeSparkCommitment, +} + +/// A type that represents the verifier's key +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + num_cons: usize, + num_vars: usize, + vk_ee: EE::VerifierKey, + S_comm: R1CSShapeSparkCommitment, +} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSSNARK> { + // commitment to oracles + comm_Az: CompressedCommitment, + comm_Bz: CompressedCommitment, + comm_Cz: CompressedCommitment, + + // commitment to oracles for the inner sum-check + comm_E_row: CompressedCommitment, + comm_E_col: CompressedCommitment, + + // initial claims + eval_Az_at_tau: G::Scalar, + eval_Bz_at_tau: G::Scalar, + eval_Cz_at_tau: G::Scalar, + + comm_output_arr: [CompressedCommitment; 8], + claims_product_arr: [G::Scalar; 8], + + // satisfiability sum-check + sc_sat: SumcheckProof, + + // claims from the end of the sum-check + eval_Az: G::Scalar, + eval_Bz: G::Scalar, + eval_Cz: G::Scalar, + eval_E: G::Scalar, + eval_E_row: G::Scalar, + eval_E_col: G::Scalar, + eval_val_A: G::Scalar, + eval_val_B: G::Scalar, + eval_val_C: G::Scalar, + eval_left_arr: [G::Scalar; 8], + eval_right_arr: [G::Scalar; 8], + eval_output_arr: [G::Scalar; 8], + eval_input_arr: [G::Scalar; 8], + eval_output2_arr: [G::Scalar; 8], + + eval_row: G::Scalar, + eval_row_read_ts: G::Scalar, + eval_E_row_at_r_prod: G::Scalar, + eval_row_audit_ts: G::Scalar, + eval_col: G::Scalar, + eval_col_read_ts: G::Scalar, + eval_E_col_at_r_prod: G::Scalar, + eval_col_audit_ts: G::Scalar, + eval_W: G::Scalar, + + // batch openings of all multilinear polynomials + sc_proof_batch: SumcheckProof, + evals_batch_arr: [G::Scalar; 7], + eval_arg: EE::EvaluationArgument, +} + +impl> RelaxedR1CSSNARK { + fn prove_inner( + mem: &mut T1, + outer: &mut T2, + inner: &mut T3, + transcript: &mut G::TE, + ) -> Result< + ( + SumcheckProof, + Vec, + Vec>, + Vec>, + Vec>, + ), + NovaError, + > + where + T1: SumcheckEngine, + T2: SumcheckEngine, + T3: SumcheckEngine, + { + // sanity checks + assert_eq!(mem.size(), outer.size()); + assert_eq!(mem.size(), inner.size()); + assert_eq!(mem.degree(), outer.degree()); + assert_eq!(mem.degree(), inner.degree()); + + // these claims are already added to the transcript, so we do not need to add + let claims = { + let claims_mem = mem.initial_claims(); + let claims_outer = outer.initial_claims(); + let claims_inner = inner.initial_claims(); + claims_mem + .into_iter() + .chain(claims_outer.into_iter()) + .chain(claims_inner.into_iter()) + .collect::>() + }; + + let num_claims = claims.len(); + let coeffs = { + let s = transcript.squeeze(b"r")?; + let mut s_vec = vec![s]; + for i in 1..num_claims { + s_vec.push(s_vec[i - 1] * s); + } + s_vec + }; + + // compute the joint claim + let claim = claims + .iter() + .zip(coeffs.iter()) + .map(|(c_1, c_2)| *c_1 * c_2) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let mut e = claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + let num_rounds = mem.size().log_2(); + for _i in 0..num_rounds { + let mut evals: Vec> = Vec::new(); + evals.extend(mem.evaluation_points()); + evals.extend(outer.evaluation_points()); + evals.extend(inner.evaluation_points()); + assert_eq!(evals.len(), num_claims); + + let evals_combined_0 = (0..evals.len()) + .map(|i| evals[i][0] * coeffs[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + let evals_combined_2 = (0..evals.len()) + .map(|i| evals[i][1] * coeffs[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + let evals_combined_3 = (0..evals.len()) + .map(|i| evals[i][2] * coeffs[i]) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let evals = vec![ + evals_combined_0, + e - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + mem.bound(&r_i); + outer.bound(&r_i); + inner.bound(&r_i); + + e = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + let mem_claims = mem.final_claims(); + let outer_claims = outer.final_claims(); + let inner_claims = inner.final_claims(); + + Ok(( + SumcheckProof::new(cubic_polys), + r, + mem_claims, + outer_claims, + inner_claims, + )) + } +} + +impl> RelaxedR1CSSNARKTrait + for RelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn setup( + ck: &CommitmentKey, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + let (pk_ee, vk_ee) = EE::setup(ck); + + // pad the R1CS matrices + let S = S.pad(); + + let S_repr = R1CSShapeSparkRepr::new(&S); + let S_comm = S_repr.commit(ck); + + let vk = VerifierKey { + num_cons: S.num_cons, + num_vars: S.num_vars, + S_comm: S_comm.clone(), + vk_ee, + }; + + let pk = ProverKey { + pk_ee, + S, + S_repr, + S_comm, + }; + + Ok((pk, vk)) + } + + /// produces a succinct proof of satisfiability of a RelaxedR1CS instance + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + let W = W.pad(&pk.S); // pad the witness + let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); + + // a list of polynomial evaluation claims that will be batched + let mut w_u_vec = Vec::new(); + + // sanity check that R1CSShape has certain size characteristics + assert_eq!(pk.S.num_cons.next_power_of_two(), pk.S.num_cons); + assert_eq!(pk.S.num_vars.next_power_of_two(), pk.S.num_vars); + assert_eq!(pk.S.num_io.next_power_of_two(), pk.S.num_io); + assert!(pk.S.num_io < pk.S.num_vars); + + // append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript + transcript.absorb(b"C", &pk.S_comm); + transcript.absorb(b"U", U); + + // compute the full satisfying assignment by concatenating W.W, U.u, and U.X + let z = concat(vec![W.W.clone(), vec![U.u], U.X.clone()]); + + // compute Az, Bz, Cz + let (mut Az, mut Bz, mut Cz) = pk.S.multiply_vec(&z)?; + + // commit to Az, Bz, Cz + let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( + || G::CE::commit(ck, &Az), + || rayon::join(|| G::CE::commit(ck, &Bz), || G::CE::commit(ck, &Cz)), + ); + + transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); + + // number of rounds of the satisfiability sum-check + let num_rounds_sat = pk.S_repr.N.log_2(); + let tau = (0..num_rounds_sat) + .map(|_| transcript.squeeze(b"t")) + .collect::, NovaError>>()?; + + // (1) send commitments to Az, Bz, and Cz along with their evaluations at tau + let (Az, Bz, Cz, E) = { + Az.resize(pk.S_repr.N, G::Scalar::zero()); + Bz.resize(pk.S_repr.N, G::Scalar::zero()); + Cz.resize(pk.S_repr.N, G::Scalar::zero()); + + let mut E = W.E.clone(); + E.resize(pk.S_repr.N, G::Scalar::zero()); + + (Az, Bz, Cz, E) + }; + let (eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau) = { + let evals_at_tau = [&Az, &Bz, &Cz] + .into_par_iter() + .map(|p| MultilinearPolynomial::evaluate_with(p, &tau)) + .collect::>(); + (evals_at_tau[0], evals_at_tau[1], evals_at_tau[2]) + }; + + // (2) send commitments to the following two oracles + // E_row(i) = eq(tau, row(i)) for all i + // E_col(i) = z(col(i)) for all i + let (mem_row, mem_col, E_row, E_col) = pk.S_repr.evaluation_oracles(&pk.S, &tau, &z); + let (comm_E_row, comm_E_col) = + rayon::join(|| G::CE::commit(ck, &E_row), || G::CE::commit(ck, &E_col)); + + // absorb the claimed evaluations into the transcript + transcript.absorb( + b"e", + &[eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau].as_slice(), + ); + // absorb commitments to E_row and E_col in the transcript + transcript.absorb(b"e", &vec![comm_E_row, comm_E_col].as_slice()); + + // add claims about Az, Bz, and Cz to be checked later + // since all the three polynomials are opened at tau, + // we can combine them into a single polynomial opened at tau + let eval_vec = vec![eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau]; + let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; + let poly_vec = vec![&Az, &Bz, &Cz]; + transcript.absorb(b"e", &eval_vec.as_slice()); // c_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let w = PolyEvalWitness::batch(&poly_vec, &c); + let u = PolyEvalInstance::batch(&comm_vec, &tau, &eval_vec, &c); + w_u_vec.push((w, u)); + + let c_inner = c; + + // we now need to prove three claims + // (1) 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)) + // (2) eval_Az_at_tau + r * eval_Bz_at_tau + r^2 * eval_Cz_at_tau = \sum_y E_row(y) * (val_A(y) + r * val_B(y) + r^2 * val_C(y)) * E_col(y) + // (3) E_row(i) = eq(tau, row(i)) and E_col(i) = z(col(i)) + + // a sum-check instance to prove the first claim + let mut outer_sc_inst = OuterSumcheckInstance { + poly_tau: MultilinearPolynomial::new(EqPolynomial::new(tau).evals()), + poly_Az: MultilinearPolynomial::new(Az.clone()), + poly_Bz: MultilinearPolynomial::new(Bz.clone()), + poly_uCz_E: { + let uCz_E = (0..Cz.len()) + .map(|i| U.u * Cz[i] + E[i]) + .collect::>(); + MultilinearPolynomial::new(uCz_E) + }, + }; + + // a sum-check instance to prove the second claim + let val = pk + .S_repr + .val_A + .iter() + .zip(pk.S_repr.val_B.iter()) + .zip(pk.S_repr.val_C.iter()) + .map(|((v_a, v_b), v_c)| *v_a + c_inner * *v_b + c_inner * c_inner * *v_c) + .collect::>(); + let mut inner_sc_inst = InnerSumcheckInstance { + claim: eval_Az_at_tau + c_inner * eval_Bz_at_tau + c_inner * c_inner * eval_Cz_at_tau, + poly_E_row: MultilinearPolynomial::new(E_row.clone()), + poly_E_col: MultilinearPolynomial::new(E_col.clone()), + poly_val: MultilinearPolynomial::new(val), + }; + + // a third sum-check instance to prove the memory-related claim + // we now need to prove that E_row and E_col are well-formed + // we use memory checking: H(INIT) * H(WS) =? H(RS) * H(FINAL) + let gamma_1 = transcript.squeeze(b"g1")?; + let gamma_2 = transcript.squeeze(b"g2")?; + + let gamma_1_sqr = gamma_1 * gamma_1; + let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar { + (*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2 + }; + + let init_row = (0..mem_row.len()) + .map(|i| hash_func(&G::Scalar::from(i as u64), &mem_row[i], &G::Scalar::zero())) + .collect::>(); + let read_row = (0..E_row.len()) + .map(|i| hash_func(&pk.S_repr.row[i], &E_row[i], &pk.S_repr.row_read_ts[i])) + .collect::>(); + let write_row = (0..E_row.len()) + .map(|i| { + hash_func( + &pk.S_repr.row[i], + &E_row[i], + &(pk.S_repr.row_read_ts[i] + G::Scalar::one()), + ) + }) + .collect::>(); + let audit_row = (0..mem_row.len()) + .map(|i| { + hash_func( + &G::Scalar::from(i as u64), + &mem_row[i], + &pk.S_repr.row_audit_ts[i], + ) + }) + .collect::>(); + + let init_col = (0..mem_col.len()) + .map(|i| hash_func(&G::Scalar::from(i as u64), &mem_col[i], &G::Scalar::zero())) + .collect::>(); + let read_col = (0..E_col.len()) + .map(|i| hash_func(&pk.S_repr.col[i], &E_col[i], &pk.S_repr.col_read_ts[i])) + .collect::>(); + let write_col = (0..E_col.len()) + .map(|i| { + hash_func( + &pk.S_repr.col[i], + &E_col[i], + &(pk.S_repr.col_read_ts[i] + G::Scalar::one()), + ) + }) + .collect::>(); + let audit_col = (0..mem_col.len()) + .map(|i| { + hash_func( + &G::Scalar::from(i as u64), + &mem_col[i], + &pk.S_repr.col_audit_ts[i], + ) + }) + .collect::>(); + + let mut mem_sc_inst = ProductSumcheckInstance::new( + ck, + vec![ + init_row, read_row, write_row, audit_row, init_col, read_col, write_col, audit_col, + ], + &mut transcript, + )?; + + let (sc_sat, r_sat, claims_mem, claims_outer, claims_inner) = Self::prove_inner( + &mut mem_sc_inst, + &mut outer_sc_inst, + &mut inner_sc_inst, + &mut transcript, + )?; + + // claims[0] is about the Eq polynomial, which the verifier computes directly + // claims[1] =? weighed sum of left(rand) + // claims[2] =? weighted sum of right(rand) + // claims[3] =? weighted sum of output(rand), which is easy to verify by querying output + // we also need to prove that output(output.len()-2) = claimed_product + let eval_left_vec = claims_mem[1].clone(); + let eval_right_vec = claims_mem[2].clone(); + let eval_output_vec = claims_mem[3].clone(); + + // claims from the end of sum-check + let (eval_Az, eval_Bz): (G::Scalar, G::Scalar) = (claims_outer[0][1], claims_outer[0][2]); + let eval_Cz = MultilinearPolynomial::evaluate_with(&Cz, &r_sat); + let eval_E = MultilinearPolynomial::evaluate_with(&E, &r_sat); + let eval_E_row = claims_inner[0][0]; + let eval_E_col = claims_inner[0][1]; + let eval_val_A = MultilinearPolynomial::evaluate_with(&pk.S_repr.val_A, &r_sat); + let eval_val_B = MultilinearPolynomial::evaluate_with(&pk.S_repr.val_B, &r_sat); + let eval_val_C = MultilinearPolynomial::evaluate_with(&pk.S_repr.val_C, &r_sat); + let eval_vec = vec![ + eval_Az, eval_Bz, eval_Cz, eval_E, eval_E_row, eval_E_col, eval_val_A, eval_val_B, eval_val_C, + ] + .into_iter() + .chain(eval_left_vec.clone().into_iter()) + .chain(eval_right_vec.clone().into_iter()) + .chain(eval_output_vec.clone().into_iter()) + .collect::>(); + + // absorb all the claimed evaluations + transcript.absorb(b"e", &eval_vec.as_slice()); + + // we now combine eval_left = left(rand) and eval_right = right(rand) + // into claims about input and output + let c = transcript.squeeze(b"c")?; + + // eval = (G::Scalar::one() - c) * eval_left + c * eval_right + // eval is claimed evaluation of input||output(r, c), which can be proven by proving input(r[1..], c) and output(r[1..], c) + let rand_ext = { + let mut r = r_sat.clone(); + r.extend(&[c]); + r + }; + let eval_input_vec = mem_sc_inst + .input_vec + .iter() + .map(|i| MultilinearPolynomial::evaluate_with(i, &rand_ext[1..])) + .collect::>(); + + let eval_output2_vec = mem_sc_inst + .output_vec + .iter() + .map(|o| MultilinearPolynomial::evaluate_with(o, &rand_ext[1..])) + .collect::>(); + + // add claimed evaluations to the transcript + let evals = eval_input_vec + .clone() + .into_iter() + .chain(eval_output2_vec.clone().into_iter()) + .collect::>(); + transcript.absorb(b"e", &evals.as_slice()); + + // squeeze a challenge to combine multiple claims into one + let powers_of_rho = { + let s = transcript.squeeze(b"r")?; + let mut s_vec = vec![s]; + for i in 1..mem_sc_inst.initial_claims().len() { + s_vec.push(s_vec[i - 1] * s); + } + s_vec + }; + + // take weighted sum of input, output, and their commitments + let product = mem_sc_inst + .claims + .iter() + .zip(powers_of_rho.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let eval_output = eval_output_vec + .iter() + .zip(powers_of_rho.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let comm_output = mem_sc_inst + .comm_output_vec + .iter() + .zip(powers_of_rho.iter()) + .map(|(c, r_i)| *c * *r_i) + .fold(Commitment::::default(), |acc, item| acc + item); + + let weighted_sum = |W: &[Vec], s: &[G::Scalar]| -> Vec { + assert_eq!(W.len(), s.len()); + let mut p = vec![G::Scalar::zero(); W[0].len()]; + for i in 0..W.len() { + for (j, item) in W[i].iter().enumerate().take(W[i].len()) { + p[j] += *item * s[i] + } + } + p + }; + + let poly_output = weighted_sum(&mem_sc_inst.output_vec, &powers_of_rho); + + let eval_output2 = eval_output2_vec + .iter() + .zip(powers_of_rho.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + // eval_output = output(r_sat) + w_u_vec.push(( + PolyEvalWitness { + p: poly_output.clone(), + }, + PolyEvalInstance { + c: comm_output, + x: r_sat.clone(), + e: eval_output, + }, + )); + + // claimed_product = output(1, ..., 1, 0) + let x = { + let mut x = vec![G::Scalar::one(); r_sat.len()]; + x[r_sat.len() - 1] = G::Scalar::zero(); + x + }; + w_u_vec.push(( + PolyEvalWitness { + p: poly_output.clone(), + }, + PolyEvalInstance { + c: comm_output, + x, + e: product, + }, + )); + + // eval_output2 = output(rand_ext[1..]) + w_u_vec.push(( + PolyEvalWitness { p: poly_output }, + PolyEvalInstance { + c: comm_output, + x: rand_ext[1..].to_vec(), + e: eval_output2, + }, + )); + + let r_prod = rand_ext[1..].to_vec(); + // row-related and col-related claims of polynomial evaluations to aid the final check of the sum-check + let evals = [ + &pk.S_repr.row, + &pk.S_repr.row_read_ts, + &E_row, + &pk.S_repr.row_audit_ts, + &pk.S_repr.col, + &pk.S_repr.col_read_ts, + &E_col, + &pk.S_repr.col_audit_ts, + ] + .into_par_iter() + .map(|p| MultilinearPolynomial::evaluate_with(p, &r_prod)) + .collect::>(); + + let eval_row = evals[0]; + let eval_row_read_ts = evals[1]; + let eval_E_row_at_r_prod = evals[2]; + let eval_row_audit_ts = evals[3]; + let eval_col = evals[4]; + let eval_col_read_ts = evals[5]; + let eval_E_col_at_r_prod = evals[6]; + let eval_col_audit_ts = evals[7]; + + // we need to prove that eval_z = z(r_prod) = (1-r_prod[0]) * W.w(r_prod[1..]) + r_prod[0] * U.x(r_prod[1..]). + // r_prod was padded, so we now remove the padding + let r_prod_unpad = { + let l = pk.S_repr.N.log_2() - (2 * pk.S.num_vars).log_2(); + r_prod[l..].to_vec() + }; + + let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_prod_unpad[1..]); + + // we can batch all the claims + transcript.absorb( + b"e", + &[ + eval_row, + eval_row_read_ts, + eval_E_row_at_r_prod, + eval_row_audit_ts, + eval_col, + eval_col_read_ts, + eval_E_col_at_r_prod, + eval_col_audit_ts, + eval_W, // this will not be batched below as it is evaluated at r_prod[1..] + ] + .as_slice(), + ); + + let c = transcript.squeeze(b"c")?; + let eval_vec = [ + eval_row, + eval_row_read_ts, + eval_E_row_at_r_prod, + eval_row_audit_ts, + eval_col, + eval_col_read_ts, + eval_E_col_at_r_prod, + eval_col_audit_ts, + ]; + let comm_vec = [ + pk.S_comm.comm_row, + pk.S_comm.comm_row_read_ts, + comm_E_row, + pk.S_comm.comm_row_audit_ts, + pk.S_comm.comm_col, + pk.S_comm.comm_col_read_ts, + comm_E_col, + pk.S_comm.comm_col_audit_ts, + ]; + let poly_vec = [ + &pk.S_repr.row, + &pk.S_repr.row_read_ts, + &E_row, + &pk.S_repr.row_audit_ts, + &pk.S_repr.col, + &pk.S_repr.col_read_ts, + &E_col, + &pk.S_repr.col_audit_ts, + ]; + let w = PolyEvalWitness::batch(&poly_vec, &c); + let u = PolyEvalInstance::batch(&comm_vec, &r_prod, &eval_vec, &c); + + // add the claim to prove for later + w_u_vec.push((w, u)); + + w_u_vec.push(( + PolyEvalWitness { p: W.W }, + PolyEvalInstance { + c: U.comm_W, + x: r_prod_unpad[1..].to_vec(), + e: eval_W, + }, + )); + + // all nine evaluations are at r_sat, we can fold them into one; they were added to the transcript earlier + let eval_vec = [ + eval_Az, eval_Bz, eval_Cz, eval_E, eval_E_row, eval_E_col, eval_val_A, eval_val_B, eval_val_C, + ]; + let comm_vec = [ + comm_Az, + comm_Bz, + comm_Cz, + U.comm_E, + comm_E_row, + comm_E_col, + pk.S_comm.comm_val_A, + pk.S_comm.comm_val_B, + pk.S_comm.comm_val_C, + ]; + let poly_vec = [ + &Az, + &Bz, + &Cz, + &E, + &E_row, + &E_col, + &pk.S_repr.val_A, + &pk.S_repr.val_B, + &pk.S_repr.val_C, + ]; + transcript.absorb(b"e", &eval_vec.as_slice()); // c_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let w = PolyEvalWitness::batch(&poly_vec, &c); + let u = PolyEvalInstance::batch(&comm_vec, &r_sat, &eval_vec, &c); + w_u_vec.push((w, u)); + + // We will now reduce a vector of claims of evaluations at different points into claims about them at the same point. + // For example, eval_W =? W(r_y[1..]) and eval_W =? E(r_x) into + // two claims: eval_W_prime =? W(rz) and eval_E_prime =? E(rz) + // We can them combine the two into one: eval_W_prime + gamma * eval_E_prime =? (W + gamma*E)(rz), + // where gamma is a public challenge + // Since commitments to W and E are homomorphic, the verifier can compute a commitment + // to the batched polynomial. + assert!(w_u_vec.len() >= 2); + + let (w_vec, u_vec): (Vec>, Vec>) = + w_u_vec.into_iter().unzip(); + let w_vec_padded = PolyEvalWitness::pad(&w_vec); // pad the polynomials to be of the same size + let u_vec_padded = PolyEvalInstance::pad(&u_vec); // pad the evaluation points + + // generate a challenge + let rho = transcript.squeeze(b"r")?; + let num_claims = w_vec_padded.len(); + let powers_of_rho = powers::(&rho, num_claims); + let claim_batch_joint = u_vec_padded + .iter() + .zip(powers_of_rho.iter()) + .map(|(u, p)| u.e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let mut polys_left: Vec> = w_vec_padded + .iter() + .map(|w| MultilinearPolynomial::new(w.p.clone())) + .collect(); + let mut polys_right: Vec> = u_vec_padded + .iter() + .map(|u| MultilinearPolynomial::new(EqPolynomial::new(u.x.clone()).evals())) + .collect(); + + let num_rounds_z = u_vec_padded[0].x.len(); + let comb_func = |poly_A_comp: &G::Scalar, poly_B_comp: &G::Scalar| -> G::Scalar { + *poly_A_comp * *poly_B_comp + }; + let (sc_proof_batch, r_z, claims_batch) = SumcheckProof::prove_quad_batch( + &claim_batch_joint, + num_rounds_z, + &mut polys_left, + &mut polys_right, + &powers_of_rho, + comb_func, + &mut transcript, + )?; + + let (claims_batch_left, _): (Vec, Vec) = claims_batch; + + transcript.absorb(b"l", &claims_batch_left.as_slice()); + + // we now combine evaluation claims at the same point rz into one + let gamma = transcript.squeeze(b"g")?; + let powers_of_gamma: Vec = powers::(&gamma, num_claims); + let comm_joint = u_vec_padded + .iter() + .zip(powers_of_gamma.iter()) + .map(|(u, g_i)| u.c * *g_i) + .fold(Commitment::::default(), |acc, item| acc + item); + let poly_joint = PolyEvalWitness::weighted_sum(&w_vec_padded, &powers_of_gamma); + let eval_joint = claims_batch_left + .iter() + .zip(powers_of_gamma.iter()) + .map(|(e, g_i)| *e * *g_i) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let eval_arg = EE::prove( + ck, + &pk.pk_ee, + &mut transcript, + &comm_joint, + &poly_joint.p, + &r_z, + &eval_joint, + )?; + + Ok(RelaxedR1CSSNARK { + comm_Az: comm_Az.compress(), + comm_Bz: comm_Bz.compress(), + comm_Cz: comm_Cz.compress(), + comm_E_row: comm_E_row.compress(), + comm_E_col: comm_E_col.compress(), + eval_Az_at_tau, + eval_Bz_at_tau, + eval_Cz_at_tau, + comm_output_arr: vec_to_arr( + mem_sc_inst + .comm_output_vec + .iter() + .map(|c| c.compress()) + .collect::>>(), + ), + claims_product_arr: vec_to_arr(mem_sc_inst.claims.clone()), + + sc_sat, + + eval_Az, + eval_Bz, + eval_Cz, + eval_E, + eval_E_row, + eval_E_col, + eval_val_A, + eval_val_B, + eval_val_C, + + eval_left_arr: vec_to_arr(eval_left_vec), + eval_right_arr: vec_to_arr(eval_right_vec), + eval_output_arr: vec_to_arr(eval_output_vec), + eval_input_arr: vec_to_arr(eval_input_vec), + eval_output2_arr: vec_to_arr(eval_output2_vec), + + eval_row, + eval_row_read_ts, + eval_E_row_at_r_prod, + eval_row_audit_ts, + eval_col, + eval_col_read_ts, + eval_E_col_at_r_prod, + eval_col_audit_ts, + eval_W, + + sc_proof_batch, + evals_batch_arr: vec_to_arr(claims_batch_left), + eval_arg, + }) + } + + /// verifies a proof of satisfiability of a RelaxedR1CS instance + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); + let mut u_vec: Vec> = Vec::new(); + + // append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript + transcript.absorb(b"C", &vk.S_comm); + transcript.absorb(b"U", U); + + let comm_Az = Commitment::::decompress(&self.comm_Az)?; + let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; + let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; + let comm_E_row = Commitment::::decompress(&self.comm_E_row)?; + let comm_E_col = Commitment::::decompress(&self.comm_E_col)?; + + transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); + + let num_rounds_sat = vk.S_comm.N.log_2(); + let tau = (0..num_rounds_sat) + .map(|_i| transcript.squeeze(b"t")) + .collect::, NovaError>>()?; + + transcript.absorb( + b"e", + &[ + self.eval_Az_at_tau, + self.eval_Bz_at_tau, + self.eval_Cz_at_tau, + ] + .as_slice(), + ); + + transcript.absorb(b"e", &vec![comm_E_row, comm_E_col].as_slice()); + + // add claims about Az, Bz, and Cz to be checked later + // since all the three polynomials are opened at tau, + // we can combine them into a single polynomial opened at tau + let eval_vec = vec![ + self.eval_Az_at_tau, + self.eval_Bz_at_tau, + self.eval_Cz_at_tau, + ]; + let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; + transcript.absorb(b"e", &eval_vec.as_slice()); // c_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let u = PolyEvalInstance::batch(&comm_vec, &tau, &eval_vec, &c); + let claim_inner = u.e; + let c_inner = c; + u_vec.push(u); + + let gamma_1 = transcript.squeeze(b"g1")?; + let gamma_2 = transcript.squeeze(b"g2")?; + + // hash function + let gamma_1_sqr = gamma_1 * gamma_1; + let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar { + (*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2 + }; + + // check the required multiset relationship + // row + if self.claims_product_arr[0] * self.claims_product_arr[2] + != self.claims_product_arr[1] * self.claims_product_arr[3] + { + return Err(NovaError::InvalidMultisetProof); + } + // col + if self.claims_product_arr[4] * self.claims_product_arr[6] + != self.claims_product_arr[5] * self.claims_product_arr[7] + { + return Err(NovaError::InvalidMultisetProof); + } + + let comm_output_vec = self + .comm_output_arr + .iter() + .map(|c| Commitment::::decompress(c)) + .collect::>, NovaError>>()?; + + transcript.absorb(b"o", &comm_output_vec.as_slice()); + transcript.absorb(b"c", &self.claims_product_arr.as_slice()); + + let num_rounds = vk.S_comm.N.log_2(); + let rand_eq = (0..num_rounds) + .map(|_i| transcript.squeeze(b"e")) + .collect::, NovaError>>()?; + + let num_claims = 10; + let coeffs = { + let s = transcript.squeeze(b"r")?; + let mut s_vec = vec![s]; + for i in 1..num_claims { + s_vec.push(s_vec[i - 1] * s); + } + s_vec + }; + + let claim = coeffs[9] * claim_inner; // rest are zeros + let (claim_sat_final, r_sat) = self + .sc_sat + .verify(claim, num_rounds_sat, 3, &mut transcript)?; + + // verify claim_sat_final + let taus_bound_r_sat = EqPolynomial::new(tau.clone()).evaluate(&r_sat); + let rand_eq_bound_r_sat = EqPolynomial::new(rand_eq).evaluate(&r_sat); + let claim_mem_final_expected = (0..8) + .map(|i| { + coeffs[i] + * rand_eq_bound_r_sat + * (self.eval_left_arr[i] * self.eval_right_arr[i] - self.eval_output_arr[i]) + }) + .fold(G::Scalar::zero(), |acc, item| acc + item); + let claim_outer_final_expected = coeffs[8] + * taus_bound_r_sat + * (self.eval_Az * self.eval_Bz - U.u * self.eval_Cz - self.eval_E); + let claim_inner_final_expected = coeffs[9] + * self.eval_E_row + * self.eval_E_col + * (self.eval_val_A + c_inner * self.eval_val_B + c_inner * c_inner * self.eval_val_C); + + if claim_mem_final_expected + claim_outer_final_expected + claim_inner_final_expected + != claim_sat_final + { + return Err(NovaError::InvalidSumcheckProof); + } + + // claims from the end of the sum-check + let eval_vec = [ + self.eval_Az, + self.eval_Bz, + self.eval_Cz, + self.eval_E, + self.eval_E_row, + self.eval_E_col, + self.eval_val_A, + self.eval_val_B, + self.eval_val_C, + ] + .into_iter() + .chain(self.eval_left_arr.into_iter()) + .chain(self.eval_right_arr.into_iter()) + .chain(self.eval_output_arr.into_iter()) + .collect::>(); + + transcript.absorb(b"e", &eval_vec.as_slice()); + // we now combine eval_left = left(rand) and eval_right = right(rand) + // into claims about input and output + let c = transcript.squeeze(b"c")?; + + // eval = (G::Scalar::one() - c) * eval_left + c * eval_right + // eval is claimed evaluation of input||output(r, c), which can be proven by proving input(r[1..], c) and output(r[1..], c) + let rand_ext = { + let mut r = r_sat.clone(); + r.extend(&[c]); + r + }; + + // add claimed evaluations to the transcript + let evals = self + .eval_input_arr + .into_iter() + .chain(self.eval_output2_arr.into_iter()) + .collect::>(); + transcript.absorb(b"e", &evals.as_slice()); + + // squeeze a challenge to combine multiple claims into one + let powers_of_rho = { + let s = transcript.squeeze(b"r")?; + let mut s_vec = vec![s]; + for i in 1..num_claims { + s_vec.push(s_vec[i - 1] * s); + } + s_vec + }; + + // take weighted sum of input, output, and their commitments + let product = self + .claims_product_arr + .iter() + .zip(powers_of_rho.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let eval_output = self + .eval_output_arr + .iter() + .zip(powers_of_rho.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let comm_output = comm_output_vec + .iter() + .zip(powers_of_rho.iter()) + .map(|(c, r_i)| *c * *r_i) + .fold(Commitment::::default(), |acc, item| acc + item); + + let eval_output2 = self + .eval_output2_arr + .iter() + .zip(powers_of_rho.iter()) + .map(|(e, p)| *e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + // eval_output = output(r_sat) + u_vec.push(PolyEvalInstance { + c: comm_output, + x: r_sat.clone(), + e: eval_output, + }); + + // claimed_product = output(1, ..., 1, 0) + let x = { + let mut x = vec![G::Scalar::one(); r_sat.len()]; + x[r_sat.len() - 1] = G::Scalar::zero(); + x + }; + u_vec.push(PolyEvalInstance { + c: comm_output, + x, + e: product, + }); + + // eval_output2 = output(rand_ext[1..]) + u_vec.push(PolyEvalInstance { + c: comm_output, + x: rand_ext[1..].to_vec(), + e: eval_output2, + }); + + let r_prod = rand_ext[1..].to_vec(); + // row-related and col-related claims of polynomial evaluations to aid the final check of the sum-check + // we can batch all the claims + transcript.absorb( + b"e", + &[ + self.eval_row, + self.eval_row_read_ts, + self.eval_E_row_at_r_prod, + self.eval_row_audit_ts, + self.eval_col, + self.eval_col_read_ts, + self.eval_E_col_at_r_prod, + self.eval_col_audit_ts, + self.eval_W, + ] + .as_slice(), + ); + let c = transcript.squeeze(b"c")?; + let eval_vec = [ + self.eval_row, + self.eval_row_read_ts, + self.eval_E_row_at_r_prod, + self.eval_row_audit_ts, + self.eval_col, + self.eval_col_read_ts, + self.eval_E_col_at_r_prod, + self.eval_col_audit_ts, + ]; + let comm_vec = [ + vk.S_comm.comm_row, + vk.S_comm.comm_row_read_ts, + comm_E_row, + vk.S_comm.comm_row_audit_ts, + vk.S_comm.comm_col, + vk.S_comm.comm_col_read_ts, + comm_E_col, + vk.S_comm.comm_col_audit_ts, + ]; + let u = PolyEvalInstance::batch(&comm_vec, &r_prod, &eval_vec, &c); + + // add the claim to prove for later + u_vec.push(u); + + // compute eval_Z + let (eval_Z, r_prod_unpad) = { + // r_prod was padded, so we now remove the padding + let (factor, r_prod_unpad) = { + let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); + + let mut factor = G::Scalar::one(); + for r_p in r_prod.iter().take(l) { + factor *= G::Scalar::one() - r_p + } + + let r_prod_unpad = { + let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); + r_prod[l..].to_vec() + }; + + (factor, r_prod_unpad) + }; + + let eval_X = { + // constant term + let mut poly_X = vec![(0, U.u)]; + //remaining inputs + poly_X.extend( + (0..U.X.len()) + .map(|i| (i + 1, U.X[i])) + .collect::>(), + ); + SparsePolynomial::new((vk.num_vars as f64).log2() as usize, poly_X) + .evaluate(&r_prod_unpad[1..]) + }; + let eval_Z = + factor * ((G::Scalar::one() - r_prod_unpad[0]) * self.eval_W + r_prod_unpad[0] * eval_X); + + (eval_Z, r_prod_unpad) + }; + + u_vec.push(PolyEvalInstance { + c: U.comm_W, + x: r_prod_unpad[1..].to_vec(), + e: self.eval_W, + }); + + // finish the final step of the sum-check + let (claim_init_expected_row, claim_audit_expected_row) = { + let addr = IdentityPolynomial::new(r_prod.len()).evaluate(&r_prod); + let val = EqPolynomial::new(tau.to_vec()).evaluate(&r_prod); + ( + hash_func(&addr, &val, &G::Scalar::zero()), + hash_func(&addr, &val, &self.eval_row_audit_ts), + ) + }; + + let (claim_read_expected_row, claim_write_expected_row) = { + ( + hash_func( + &self.eval_row, + &self.eval_E_row_at_r_prod, + &self.eval_row_read_ts, + ), + hash_func( + &self.eval_row, + &self.eval_E_row_at_r_prod, + &(self.eval_row_read_ts + G::Scalar::one()), + ), + ) + }; + + // multiset check for the row + if claim_init_expected_row != self.eval_input_arr[0] + || claim_read_expected_row != self.eval_input_arr[1] + || claim_write_expected_row != self.eval_input_arr[2] + || claim_audit_expected_row != self.eval_input_arr[3] + { + return Err(NovaError::InvalidSumcheckProof); + } + + let (claim_init_expected_col, claim_audit_expected_col) = { + let addr = IdentityPolynomial::new(r_prod.len()).evaluate(&r_prod); + let val = eval_Z; + ( + hash_func(&addr, &val, &G::Scalar::zero()), + hash_func(&addr, &val, &self.eval_col_audit_ts), + ) + }; + + let (claim_read_expected_col, claim_write_expected_col) = { + ( + hash_func( + &self.eval_col, + &self.eval_E_col_at_r_prod, + &self.eval_col_read_ts, + ), + hash_func( + &self.eval_col, + &self.eval_E_col_at_r_prod, + &(self.eval_col_read_ts + G::Scalar::one()), + ), + ) + }; + + // multiset check for the col + if claim_init_expected_col != self.eval_input_arr[4] + || claim_read_expected_col != self.eval_input_arr[5] + || claim_write_expected_col != self.eval_input_arr[6] + || claim_audit_expected_col != self.eval_input_arr[7] + { + return Err(NovaError::InvalidSumcheckProof); + } + + // since all the nine polynomials are opened at r_sat, + // we can combine them into a single polynomial opened at r_sat + let eval_vec = [ + self.eval_Az, + self.eval_Bz, + self.eval_Cz, + self.eval_E, + self.eval_E_row, + self.eval_E_col, + self.eval_val_A, + self.eval_val_B, + self.eval_val_C, + ]; + let comm_vec = [ + comm_Az, + comm_Bz, + comm_Cz, + U.comm_E, + comm_E_row, + comm_E_col, + vk.S_comm.comm_val_A, + vk.S_comm.comm_val_B, + vk.S_comm.comm_val_C, + ]; + transcript.absorb(b"e", &eval_vec.as_slice()); // c_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let u = PolyEvalInstance::batch(&comm_vec, &r_sat, &eval_vec, &c); + u_vec.push(u); + + let u_vec_padded = PolyEvalInstance::pad(&u_vec); // pad the evaluation points + + // generate a challenge + let rho = transcript.squeeze(b"r")?; + let num_claims = u_vec.len(); + let powers_of_rho = powers::(&rho, num_claims); + let claim_batch_joint = u_vec_padded + .iter() + .zip(powers_of_rho.iter()) + .map(|(u, p)| u.e * p) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + let num_rounds_z = u_vec_padded[0].x.len(); + let (claim_batch_final, r_z) = + self + .sc_proof_batch + .verify(claim_batch_joint, num_rounds_z, 2, &mut transcript)?; + + let claim_batch_final_expected = { + let poly_rz = EqPolynomial::new(r_z.clone()); + let evals = u_vec_padded + .iter() + .map(|u| poly_rz.evaluate(&u.x)) + .collect::>(); + + evals + .iter() + .zip(self.evals_batch_arr.iter()) + .zip(powers_of_rho.iter()) + .map(|((e_i, p_i), rho_i)| *e_i * *p_i * rho_i) + .fold(G::Scalar::zero(), |acc, item| acc + item) + }; + + if claim_batch_final != claim_batch_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + transcript.absorb(b"l", &self.evals_batch_arr.as_slice()); + + // we now combine evaluation claims at the same point rz into one + let gamma = transcript.squeeze(b"g")?; + let powers_of_gamma: Vec = powers::(&gamma, num_claims); + let comm_joint = u_vec_padded + .iter() + .zip(powers_of_gamma.iter()) + .map(|(u, g_i)| u.c * *g_i) + .fold(Commitment::::default(), |acc, item| acc + item); + let eval_joint = self + .evals_batch_arr + .iter() + .zip(powers_of_gamma.iter()) + .map(|(e, g_i)| *e * *g_i) + .fold(G::Scalar::zero(), |acc, item| acc + item); + + // verify + EE::verify( + &vk.vk_ee, + &mut transcript, + &comm_joint, + &r_z, + &eval_joint, + &self.eval_arg, + )?; + + Ok(()) + } +} + +// provides direct interfaces to call the SNARK implemented in this module +struct SpartanCircuit> { + z_i: Option>, // inputs to the circuit + sc: SC, // step circuit to be executed +} + +impl> Circuit for SpartanCircuit { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + // obtain the arity information + let arity = self.sc.arity(); + + // Allocate zi. If inputs.zi is not provided, allocate default value 0 + let zero = vec![G::Scalar::zero(); arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.z_i.as_ref().unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + let z_i_plus_one = self.sc.synthesize(&mut cs.namespace(|| "F"), &z_i)?; + + // inputize both z_i and z_i_plus_one + for (j, input) in z_i.iter().enumerate().take(arity) { + let _ = input.inputize(cs.namespace(|| format!("input {j}"))); + } + for (j, output) in z_i_plus_one.iter().enumerate().take(arity) { + let _ = output.inputize(cs.namespace(|| format!("output {j}"))); + } + + Ok(()) + } +} + +/// A type that holds Spartan's prover key +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SpartanProverKey +where + G: Group, + EE: EvaluationEngineTrait, +{ + S: R1CSShape, + ck: CommitmentKey, + pk: ProverKey, +} + +/// A type that holds Spartan's verifier key +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SpartanVerifierKey +where + G: Group, + EE: EvaluationEngineTrait, +{ + vk: VerifierKey, +} + +/// A direct SNARK proving a step circuit +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SpartanSNARK +where + G: Group, + EE: EvaluationEngineTrait, + C: StepCircuit, +{ + comm_W: Commitment, // commitment to the witness + snark: RelaxedR1CSSNARK, // snark proving the witness is satisfying + _p: PhantomData, +} + +impl, C: StepCircuit> + SpartanSNARK +{ + /// Produces prover and verifier keys for Spartan + pub fn setup(sc: C) -> Result<(SpartanProverKey, SpartanVerifierKey), NovaError> { + // construct a circuit that can be synthesized + let circuit: SpartanCircuit = SpartanCircuit { z_i: None, sc }; + + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit.synthesize(&mut cs); + let (S, ck) = cs.r1cs_shape(); + + let (pk, vk) = RelaxedR1CSSNARK::setup(&ck, &S)?; + + let pk = SpartanProverKey { S, ck, pk }; + + let vk = SpartanVerifierKey { vk }; + + Ok((pk, vk)) + } + + /// Produces a proof of satisfiability of the provided circuit + pub fn prove(pk: &SpartanProverKey, sc: C, z_i: &[G::Scalar]) -> Result { + let mut cs: SatisfyingAssignment = SatisfyingAssignment::new(); + + let circuit: SpartanCircuit = SpartanCircuit { + z_i: Some(z_i.to_vec()), + sc, + }; + + let _ = circuit.synthesize(&mut cs); + let (u, w) = cs + .r1cs_instance_and_witness(&pk.S, &pk.ck) + .map_err(|_e| NovaError::UnSat)?; + + // convert the instance and witness to relaxed form + let (u_relaxed, w_relaxed) = ( + RelaxedR1CSInstance::from_r1cs_instance_unchecked(&u.comm_W, &u.X), + RelaxedR1CSWitness::from_r1cs_witness(&pk.S, &w), + ); + + // prove the instance using Spartan + let snark = RelaxedR1CSSNARK::prove(&pk.ck, &pk.pk, &u_relaxed, &w_relaxed)?; + + Ok(SpartanSNARK { + comm_W: u.comm_W, + snark, + _p: Default::default(), + }) + } + + /// Verifies a proof of satisfiability + pub fn verify(&self, vk: &SpartanVerifierKey, io: &[G::Scalar]) -> Result<(), NovaError> { + // construct an instance using the provided commitment to the witness and z_i and z_{i+1} + let u_relaxed = RelaxedR1CSInstance::from_r1cs_instance_unchecked(&self.comm_W, io); + + // verify the snark using the constructed instance + self.snark.verify(&vk.vk, &u_relaxed)?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + type G = pasta_curves::pallas::Point; + type EE = crate::provider::ipa_pc::EvaluationEngine; + use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; + use core::marker::PhantomData; + use ff::PrimeField; + + #[derive(Clone, Debug, Default)] + struct CubicCircuit { + _p: PhantomData, + } + + impl StepCircuit for CubicCircuit + where + F: PrimeField, + { + fn arity(&self) -> usize { + 1 + } + + fn synthesize>( + &self, + cs: &mut CS, + z: &[AllocatedNum], + ) -> Result>, SynthesisError> { + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are respectively the input and output. + let x = &z[0]; + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) + })?; + + cs.enforce( + || "y = x^3 + x + 5", + |lc| { + lc + x_cu.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + Ok(vec![y]) + } + + fn output(&self, z: &[F]) -> Vec { + vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] + } + } + + #[test] + fn test_spartan_snark() { + let circuit = CubicCircuit::default(); + + // produce keys + let (pk, vk) = + SpartanSNARK::::Scalar>>::setup(circuit.clone()).unwrap(); + + let num_steps = 3; + + // setup inputs + let z0 = vec![::Scalar::zero()]; + let mut z_i = z0; + + for _i in 0..num_steps { + // produce a SNARK + let res = SpartanSNARK::prove(&pk, circuit.clone(), &z_i); + assert!(res.is_ok()); + + let z_i_plus_one = circuit.output(&z_i); + + let snark = res.unwrap(); + + // verify the SNARK + let io = z_i + .clone() + .into_iter() + .chain(z_i_plus_one.clone().into_iter()) + .collect::>(); + let res = snark.verify(&vk, &io); + assert!(res.is_ok()); + + // set input to the next step + z_i = z_i_plus_one.clone(); + } + + // sanity: check the claimed output with a direct computation of the same + assert_eq!(z_i, vec![::Scalar::from(2460515u64)]); + } +} diff --git a/src/spartan/pp/mod.rs b/src/spartan/pp/mod.rs deleted file mode 100644 index 5b926ba..0000000 --- a/src/spartan/pp/mod.rs +++ /dev/null @@ -1,1576 +0,0 @@ -//! This module implements RelaxedR1CSSNARK traits using a spark-based approach to prove evaluations of -//! sparse multilinear polynomials involved in Spartan's sum-check protocol, thereby providing a preprocessing SNARK -use crate::{ - bellperson::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - spartan::{ - math::Math, - polynomial::{EqPolynomial, MultilinearPolynomial, SparsePolynomial}, - sumcheck::SumcheckProof, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - circuit::StepCircuit, commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, - snark::RelaxedR1CSSNARKTrait, Group, TranscriptEngineTrait, TranscriptReprTrait, - }, - Commitment, CommitmentKey, -}; -use bellperson::{gadgets::num::AllocatedNum, Circuit, ConstraintSystem, SynthesisError}; -use core::{cmp::max, marker::PhantomData}; -use ff::Field; -use itertools::concat; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -mod product; - -use product::{IdentityPolynomial, ProductArgument}; - -/// A type that holds R1CSShape in a form amenable to memory checking -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSShapeSparkRepr { - N: usize, // size of the vectors - - // dense representation - row: Vec, - col: Vec, - val_A: Vec, - val_B: Vec, - val_C: Vec, - - // timestamp polynomials - row_read_ts: Vec, - row_audit_ts: Vec, - col_read_ts: Vec, - col_audit_ts: Vec, -} - -/// A type that holds a commitment to a sparse polynomial -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSShapeSparkCommitment { - N: usize, // size of each vector - - // commitments to the dense representation - comm_row: Commitment, - comm_col: Commitment, - comm_val_A: Commitment, - comm_val_B: Commitment, - comm_val_C: Commitment, - - // commitments to the timestamp polynomials - comm_row_read_ts: Commitment, - comm_row_audit_ts: Commitment, - comm_col_read_ts: Commitment, - comm_col_audit_ts: Commitment, -} - -impl TranscriptReprTrait for R1CSShapeSparkCommitment { - fn to_transcript_bytes(&self) -> Vec { - [ - self.comm_row, - self.comm_col, - self.comm_val_A, - self.comm_val_B, - self.comm_val_C, - self.comm_row_read_ts, - self.comm_row_audit_ts, - self.comm_col_read_ts, - self.comm_col_audit_ts, - ] - .as_slice() - .to_transcript_bytes() - } -} - -impl R1CSShapeSparkRepr { - /// represents R1CSShape in a Spark-friendly format amenable to memory checking - pub fn new(S: &R1CSShape) -> R1CSShapeSparkRepr { - let N = { - let total_nz = S.A.len() + S.B.len() + S.C.len(); - max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() - }; - - let row = { - let mut r = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, _, _)| *r) - .collect::>(); - r.resize(N, 0usize); - r - }; - - let col = { - let mut c = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(_, c, _)| *c) - .collect::>(); - c.resize(N, 0usize); - c - }; - - let val_A = { - let mut val = S.A.iter().map(|(_, _, v)| *v).collect::>(); - val.resize(N, G::Scalar::zero()); - val - }; - - let val_B = { - // prepend zeros - let mut val = vec![G::Scalar::zero(); S.A.len()]; - val.extend(S.B.iter().map(|(_, _, v)| *v).collect::>()); - // append zeros - val.resize(N, G::Scalar::zero()); - val - }; - - let val_C = { - // prepend zeros - let mut val = vec![G::Scalar::zero(); S.A.len() + S.B.len()]; - val.extend(S.C.iter().map(|(_, _, v)| *v).collect::>()); - // append zeros - val.resize(N, G::Scalar::zero()); - val - }; - - // timestamp calculation routine - let timestamp_calc = - |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> (Vec, Vec) { - let mut read_ts = vec![0usize; num_ops]; - let mut audit_ts = vec![0usize; num_cells]; - - assert!(num_ops >= addr_trace.len()); - for i in 0..addr_trace.len() { - let addr = addr_trace[i]; - assert!(addr < num_cells); - let r_ts = audit_ts[addr]; - read_ts[i] = r_ts; - - let w_ts = r_ts + 1; - audit_ts[addr] = w_ts; - } - (read_ts, audit_ts) - }; - - // timestamp polynomials for row - let (row_read_ts, row_audit_ts) = timestamp_calc(N, N, &row); - let (col_read_ts, col_audit_ts) = timestamp_calc(N, N, &col); - - // a routine to turn a vector of usize into a vector scalars - let to_vec_scalar = |v: &[usize]| -> Vec { - (0..v.len()) - .map(|i| G::Scalar::from(v[i] as u64)) - .collect::>() - }; - - R1CSShapeSparkRepr { - N, - - // dense representation - row: to_vec_scalar(&row), - col: to_vec_scalar(&col), - val_A, - val_B, - val_C, - - // timestamp polynomials - row_read_ts: to_vec_scalar(&row_read_ts), - row_audit_ts: to_vec_scalar(&row_audit_ts), - col_read_ts: to_vec_scalar(&col_read_ts), - col_audit_ts: to_vec_scalar(&col_audit_ts), - } - } - - fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { - let comm_vec: Vec> = [ - &self.row, - &self.col, - &self.val_A, - &self.val_B, - &self.val_C, - &self.row_read_ts, - &self.row_audit_ts, - &self.col_read_ts, - &self.col_audit_ts, - ] - .par_iter() - .map(|v| G::CE::commit(ck, v)) - .collect(); - - R1CSShapeSparkCommitment { - N: self.row.len(), - comm_row: comm_vec[0], - comm_col: comm_vec[1], - comm_val_A: comm_vec[2], - comm_val_B: comm_vec[3], - comm_val_C: comm_vec[4], - comm_row_read_ts: comm_vec[5], - comm_row_audit_ts: comm_vec[6], - comm_col_read_ts: comm_vec[7], - comm_col_audit_ts: comm_vec[8], - } - } - - /// evaluates the the provided R1CSShape at (r_x, r_y) - pub fn multi_evaluate( - M_vec: &[&[(usize, usize, G::Scalar)]], - r_x: &[G::Scalar], - r_y: &[G::Scalar], - ) -> Vec { - let evaluate_with_table = - |M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar { - (0..M.len()) - .collect::>() - .par_iter() - .map(|&i| { - let (row, col, val) = M[i]; - T_x[row] * T_y[col] * val - }) - .reduce(G::Scalar::zero, |acc, x| acc + x) - }; - - let (T_x, T_y) = rayon::join( - || EqPolynomial::new(r_x.to_vec()).evals(), - || EqPolynomial::new(r_y.to_vec()).evals(), - ); - - (0..M_vec.len()) - .collect::>() - .par_iter() - .map(|&i| evaluate_with_table(M_vec[i], &T_x, &T_y)) - .collect() - } - - // computes evaluation oracles - fn evaluation_oracles( - &self, - S: &R1CSShape, - r_x: &[G::Scalar], - z: &[G::Scalar], - ) -> ( - Vec, - Vec, - Vec, - Vec, - ) { - let r_x_padded = { - let mut x = vec![G::Scalar::zero(); self.N.log_2() - r_x.len()]; - x.extend(r_x); - x - }; - - let mem_row = EqPolynomial::new(r_x_padded).evals(); - let mem_col = { - let mut z = z.to_vec(); - z.resize(self.N, G::Scalar::zero()); - z - }; - - let mut E_row = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, _, _)| mem_row[*r]) - .collect::>(); - - let mut E_col = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(_, c, _)| mem_col[*c]) - .collect::>(); - - E_row.resize(self.N, mem_row[0]); // we place mem_row[0] since resized row is appended with 0s - E_col.resize(self.N, mem_col[0]); - - (mem_row, mem_col, E_row, E_col) - } -} - -/// A type that represents the memory-checking argument -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct MemcheckProof { - sc_prod: ProductArgument, - - eval_init_row: G::Scalar, - eval_read_row: G::Scalar, - eval_write_row: G::Scalar, - eval_audit_row: G::Scalar, - eval_init_col: G::Scalar, - eval_read_col: G::Scalar, - eval_write_col: G::Scalar, - eval_audit_col: G::Scalar, - - eval_row: G::Scalar, - eval_row_read_ts: G::Scalar, - eval_E_row: G::Scalar, - eval_row_audit_ts: G::Scalar, - eval_col: G::Scalar, - eval_col_read_ts: G::Scalar, - eval_E_col: G::Scalar, - eval_col_audit_ts: G::Scalar, - eval_z: G::Scalar, -} - -impl MemcheckProof { - #[allow(clippy::too_many_arguments)] - /// proves a memory-checking relation - fn prove( - ck: &CommitmentKey, - S_repr: &R1CSShapeSparkRepr, - S_comm: &R1CSShapeSparkCommitment, - mem_row: &[G::Scalar], - comm_E_row: &Commitment, - E_row: &[G::Scalar], - mem_col: &[G::Scalar], - comm_E_col: &Commitment, - E_col: &[G::Scalar], - transcript: &mut G::TE, - ) -> Result< - ( - MemcheckProof, - Vec<(PolyEvalWitness, PolyEvalInstance)>, - G::Scalar, - Vec, - ), - NovaError, - > { - // we now need to prove that E_row and E_col are well-formed - // we use memory checking: H(INIT) * H(WS) =? H(RS) * H(FINAL) - let gamma_1 = transcript.squeeze(b"g1")?; - let gamma_2 = transcript.squeeze(b"g2")?; - - let gamma_1_sqr = gamma_1 * gamma_1; - let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar { - (*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2 - }; - - let init_row = (0..mem_row.len()) - .map(|i| hash_func(&G::Scalar::from(i as u64), &mem_row[i], &G::Scalar::zero())) - .collect::>(); - let read_row = (0..E_row.len()) - .map(|i| hash_func(&S_repr.row[i], &E_row[i], &S_repr.row_read_ts[i])) - .collect::>(); - let write_row = (0..E_row.len()) - .map(|i| { - hash_func( - &S_repr.row[i], - &E_row[i], - &(S_repr.row_read_ts[i] + G::Scalar::one()), - ) - }) - .collect::>(); - let audit_row = (0..mem_row.len()) - .map(|i| { - hash_func( - &G::Scalar::from(i as u64), - &mem_row[i], - &S_repr.row_audit_ts[i], - ) - }) - .collect::>(); - - let init_col = (0..mem_col.len()) - .map(|i| hash_func(&G::Scalar::from(i as u64), &mem_col[i], &G::Scalar::zero())) - .collect::>(); - let read_col = (0..E_col.len()) - .map(|i| hash_func(&S_repr.col[i], &E_col[i], &S_repr.col_read_ts[i])) - .collect::>(); - let write_col = (0..E_col.len()) - .map(|i| { - hash_func( - &S_repr.col[i], - &E_col[i], - &(S_repr.col_read_ts[i] + G::Scalar::one()), - ) - }) - .collect::>(); - let audit_col = (0..mem_col.len()) - .map(|i| { - hash_func( - &G::Scalar::from(i as u64), - &mem_col[i], - &S_repr.col_audit_ts[i], - ) - }) - .collect::>(); - - let (sc_prod, evals_prod, r_prod, _evals_input_vec, mut w_u_vec) = ProductArgument::prove( - ck, - &[ - init_row, read_row, write_row, audit_row, init_col, read_col, write_col, audit_col, - ], - transcript, - )?; - - // row-related and col-related claims of polynomial evaluations to aid the final check of the sum-check - let evals = [ - &S_repr.row, - &S_repr.row_read_ts, - E_row, - &S_repr.row_audit_ts, - &S_repr.col, - &S_repr.col_read_ts, - E_col, - &S_repr.col_audit_ts, - mem_col, - ] - .into_par_iter() - .map(|p| MultilinearPolynomial::evaluate_with(p, &r_prod)) - .collect::>(); - - let eval_row = evals[0]; - let eval_row_read_ts = evals[1]; - let eval_E_row = evals[2]; - let eval_row_audit_ts = evals[3]; - let eval_col = evals[4]; - let eval_col_read_ts = evals[5]; - let eval_E_col = evals[6]; - let eval_col_audit_ts = evals[7]; - let eval_z = evals[8]; - - // we can batch all the claims - transcript.absorb( - b"e", - &[ - eval_row, - eval_row_read_ts, - eval_E_row, - eval_row_audit_ts, - eval_col, - eval_col_read_ts, - eval_E_col, - eval_col_audit_ts, - ] - .as_slice(), - ); - let c = transcript.squeeze(b"c")?; - let eval_joint = eval_row - + c * eval_row_read_ts - + c * c * eval_E_row - + c * c * c * eval_row_audit_ts - + c * c * c * c * eval_col - + c * c * c * c * c * eval_col_read_ts - + c * c * c * c * c * c * eval_E_col - + c * c * c * c * c * c * c * eval_col_audit_ts; - let comm_joint = S_comm.comm_row - + S_comm.comm_row_read_ts * c - + *comm_E_row * c * c - + S_comm.comm_row_audit_ts * c * c * c - + S_comm.comm_col * c * c * c * c - + S_comm.comm_col_read_ts * c * c * c * c * c - + *comm_E_col * c * c * c * c * c * c - + S_comm.comm_col_audit_ts * c * c * c * c * c * c * c; - let poly_joint = S_repr - .row - .iter() - .zip(S_repr.row_read_ts.iter()) - .zip(E_row.iter()) - .zip(S_repr.row_audit_ts.iter()) - .zip(S_repr.col.iter()) - .zip(S_repr.col_read_ts.iter()) - .zip(E_col.iter()) - .zip(S_repr.col_audit_ts.iter()) - .map(|(((((((x, y), z), m), n), q), s), t)| { - *x + c * y - + c * c * z - + c * c * c * m - + c * c * c * c * n - + c * c * c * c * c * q - + c * c * c * c * c * c * s - + c * c * c * c * c * c * c * t - }) - .collect::>(); - - // add the claim to prove for later - w_u_vec.push(( - PolyEvalWitness { p: poly_joint }, - PolyEvalInstance { - c: comm_joint, - x: r_prod.clone(), - e: eval_joint, - }, - )); - - let eval_arg = Self { - eval_init_row: evals_prod[0], - eval_read_row: evals_prod[1], - eval_write_row: evals_prod[2], - eval_audit_row: evals_prod[3], - eval_init_col: evals_prod[4], - eval_read_col: evals_prod[5], - eval_write_col: evals_prod[6], - eval_audit_col: evals_prod[7], - sc_prod, - - eval_row, - eval_row_read_ts, - eval_E_row, - eval_row_audit_ts, - eval_col, - eval_col_read_ts, - eval_E_col, - eval_col_audit_ts, - eval_z, - }; - - Ok((eval_arg, w_u_vec, eval_z, r_prod)) - } - - /// verifies a memory-checking relation - fn verify( - &self, - S_comm: &R1CSShapeSparkCommitment, - comm_E_row: &Commitment, - comm_E_col: &Commitment, - r_x: &[G::Scalar], - transcript: &mut G::TE, - ) -> Result<(Vec>, G::Scalar, Vec), NovaError> { - let r_x_padded = { - let mut x = vec![G::Scalar::zero(); S_comm.N.log_2() - r_x.len()]; - x.extend(r_x); - x - }; - - // verify if E_row and E_col are well formed - let gamma_1 = transcript.squeeze(b"g1")?; - let gamma_2 = transcript.squeeze(b"g2")?; - - // hash function - let gamma_1_sqr = gamma_1 * gamma_1; - let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar { - (*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2 - }; - - // check the required multiset relationship - // row - if self.eval_init_row * self.eval_write_row != self.eval_read_row * self.eval_audit_row { - return Err(NovaError::InvalidMultisetProof); - } - // col - if self.eval_init_col * self.eval_write_col != self.eval_read_col * self.eval_audit_col { - return Err(NovaError::InvalidMultisetProof); - } - - // verify the product proofs - let (claims_final, r_prod, mut u_vec) = self.sc_prod.verify( - &[ - self.eval_init_row, - self.eval_read_row, - self.eval_write_row, - self.eval_audit_row, - self.eval_init_col, - self.eval_read_col, - self.eval_write_col, - self.eval_audit_col, - ], - S_comm.N, - transcript, - )?; - - // finish the final step of the sum-check - let (claim_init_expected_row, claim_audit_expected_row) = { - let addr = IdentityPolynomial::new(r_prod.len()).evaluate(&r_prod); - let val = EqPolynomial::new(r_x_padded.to_vec()).evaluate(&r_prod); - ( - hash_func(&addr, &val, &G::Scalar::zero()), - hash_func(&addr, &val, &self.eval_row_audit_ts), - ) - }; - - let (claim_read_expected_row, claim_write_expected_row) = { - ( - hash_func(&self.eval_row, &self.eval_E_row, &self.eval_row_read_ts), - hash_func( - &self.eval_row, - &self.eval_E_row, - &(self.eval_row_read_ts + G::Scalar::one()), - ), - ) - }; - - // multiset check for the row - if claim_init_expected_row != claims_final[0] - || claim_read_expected_row != claims_final[1] - || claim_write_expected_row != claims_final[2] - || claim_audit_expected_row != claims_final[3] - { - return Err(NovaError::InvalidSumcheckProof); - } - - let (claim_init_expected_col, claim_audit_expected_col) = { - let addr = IdentityPolynomial::new(r_prod.len()).evaluate(&r_prod); - let val = self.eval_z; // this value is later checked against U.comm_W and u.X - ( - hash_func(&addr, &val, &G::Scalar::zero()), - hash_func(&addr, &val, &self.eval_col_audit_ts), - ) - }; - - let (claim_read_expected_col, claim_write_expected_col) = { - ( - hash_func(&self.eval_col, &self.eval_E_col, &self.eval_col_read_ts), - hash_func( - &self.eval_col, - &self.eval_E_col, - &(self.eval_col_read_ts + G::Scalar::one()), - ), - ) - }; - - // multiset check for the col - if claim_init_expected_col != claims_final[4] - || claim_read_expected_col != claims_final[5] - || claim_write_expected_col != claims_final[6] - || claim_audit_expected_col != claims_final[7] - { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb( - b"e", - &[ - self.eval_row, - self.eval_row_read_ts, - self.eval_E_row, - self.eval_row_audit_ts, - self.eval_col, - self.eval_col_read_ts, - self.eval_E_col, - self.eval_col_audit_ts, - ] - .as_slice(), - ); - let c = transcript.squeeze(b"c")?; - let eval_joint = self.eval_row - + c * self.eval_row_read_ts - + c * c * self.eval_E_row - + c * c * c * self.eval_row_audit_ts - + c * c * c * c * self.eval_col - + c * c * c * c * c * self.eval_col_read_ts - + c * c * c * c * c * c * self.eval_E_col - + c * c * c * c * c * c * c * self.eval_col_audit_ts; - let comm_joint = S_comm.comm_row - + S_comm.comm_row_read_ts * c - + *comm_E_row * c * c - + S_comm.comm_row_audit_ts * c * c * c - + S_comm.comm_col * c * c * c * c - + S_comm.comm_col_read_ts * c * c * c * c * c - + *comm_E_col * c * c * c * c * c * c - + S_comm.comm_col_audit_ts * c * c * c * c * c * c * c; - - u_vec.push(PolyEvalInstance { - c: comm_joint, - x: r_prod.clone(), - e: eval_joint, - }); - - Ok((u_vec, self.eval_z, r_prod)) - } -} - -/// A type that represents the prover's key -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey> { - pk_ee: EE::ProverKey, - S: R1CSShape, - S_repr: R1CSShapeSparkRepr, - S_comm: R1CSShapeSparkCommitment, -} - -/// A type that represents the verifier's key -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - num_cons: usize, - num_vars: usize, - vk_ee: EE::VerifierKey, - S_comm: R1CSShapeSparkCommitment, -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { - // outer sum-check - sc_proof_outer: SumcheckProof, - - // claims from the end of the outer sum-check - eval_Az: G::Scalar, - eval_Bz: G::Scalar, - eval_Cz: G::Scalar, - eval_E: G::Scalar, - - // commitment to oracles for the inner sum-check - comm_E_row: Commitment, - comm_E_col: Commitment, - - // inner sum-check - sc_proof_inner: SumcheckProof, - - // claims from the end of inner sum-check - eval_E_row: G::Scalar, - eval_E_col: G::Scalar, - eval_val_A: G::Scalar, - eval_val_B: G::Scalar, - eval_val_C: G::Scalar, - - // memory-checking proof - mc_proof: MemcheckProof, - - // claim about W evaluation - eval_W: G::Scalar, - - // batch openings of all multilinear polynomials - sc_proof_batch: SumcheckProof, - evals_batch: Vec, - eval_arg: EE::EvaluationArgument, -} - -impl> RelaxedR1CSSNARKTrait - for RelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn setup( - ck: &CommitmentKey, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - let (pk_ee, vk_ee) = EE::setup(ck); - - // pad the R1CS matrices - let S = S.pad(); - - let S_repr = R1CSShapeSparkRepr::new(&S); - let S_comm = S_repr.commit(ck); - - let vk = VerifierKey { - num_cons: S.num_cons, - num_vars: S.num_vars, - S_comm: S_comm.clone(), - vk_ee, - }; - - let pk = ProverKey { - pk_ee, - S, - S_repr, - S_comm, - }; - - Ok((pk, vk)) - } - - /// produces a succinct proof of satisfiability of a RelaxedR1CS instance - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - let W = W.pad(&pk.S); // pad the witness - let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); - - // a list of polynomial evaluation claims that will be batched - let mut w_u_vec = Vec::new(); - - // sanity check that R1CSShape has certain size characteristics - assert_eq!(pk.S.num_cons.next_power_of_two(), pk.S.num_cons); - assert_eq!(pk.S.num_vars.next_power_of_two(), pk.S.num_vars); - assert_eq!(pk.S.num_io.next_power_of_two(), pk.S.num_io); - assert!(pk.S.num_io < pk.S.num_vars); - - // append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript - transcript.absorb(b"C", &pk.S_comm); - transcript.absorb(b"U", U); - - // compute the full satisfying assignment by concatenating W.W, U.u, and U.X - let z = concat(vec![W.W.clone(), vec![U.u], U.X.clone()]); - - let (num_rounds_x, _num_rounds_y) = ( - (pk.S.num_cons as f64).log2() as usize, - ((pk.S.num_vars as f64).log2() as usize + 1), - ); - - // outer sum-check - let tau = (0..num_rounds_x) - .map(|_i| transcript.squeeze(b"t")) - .collect::, NovaError>>()?; - - let mut poly_tau = MultilinearPolynomial::new(EqPolynomial::new(tau).evals()); - let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = { - let (poly_Az, poly_Bz, poly_Cz) = pk.S.multiply_vec(&z)?; - let poly_uCz_E = (0..pk.S.num_cons) - .map(|i| U.u * poly_Cz[i] + W.E[i]) - .collect::>(); - ( - MultilinearPolynomial::new(poly_Az), - MultilinearPolynomial::new(poly_Bz), - MultilinearPolynomial::new(poly_Cz), - MultilinearPolynomial::new(poly_uCz_E), - ) - }; - - let comb_func_outer = - |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar, - poly_D_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; - let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term( - &G::Scalar::zero(), // claim is zero - num_rounds_x, - &mut poly_tau, - &mut poly_Az, - &mut poly_Bz, - &mut poly_uCz_E, - comb_func_outer, - &mut transcript, - )?; - - // claims from the end of sum-check - let (eval_Az, eval_Bz): (G::Scalar, G::Scalar) = (claims_outer[1], claims_outer[2]); - let eval_Cz = poly_Cz.evaluate(&r_x); - let eval_E = MultilinearPolynomial::new(W.E.clone()).evaluate(&r_x); - transcript.absorb(b"o", &[eval_Az, eval_Bz, eval_Cz, eval_E].as_slice()); - - // add claim about eval_E to be proven - w_u_vec.push(( - PolyEvalWitness { p: W.E.clone() }, - PolyEvalInstance { - c: U.comm_E, - x: r_x.clone(), - e: eval_E, - }, - )); - - // send oracles to aid the inner sum-check - // E_row(i) = eq(r_x, row(i)) for all i - // E_col(i) = z(col(i)) for all i - let (mem_row, z, E_row, E_col) = pk.S_repr.evaluation_oracles(&pk.S, &r_x, &z); - let (comm_E_row, comm_E_col) = - rayon::join(|| G::CE::commit(ck, &E_row), || G::CE::commit(ck, &E_col)); - - // add E_row and E_col to transcript - transcript.absorb(b"e", &vec![comm_E_row, comm_E_col].as_slice()); - - let r = transcript.squeeze(b"r")?; - let val = pk - .S_repr - .val_A - .iter() - .zip(pk.S_repr.val_B.iter()) - .zip(pk.S_repr.val_C.iter()) - .map(|((a, b), c)| *a + r * *b + r * r * *c) - .collect::>(); - - // inner sum-check - let claim_inner_joint = eval_Az + r * eval_Bz + r * r * eval_Cz; - let num_rounds_y = pk.S_repr.N.log_2(); - let comb_func = |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - - debug_assert_eq!( - E_row - .iter() - .zip(val.iter()) - .zip(E_col.iter()) - .map(|((a, b), c)| *a * *b * *c) - .fold(G::Scalar::zero(), |acc, item| acc + item), - claim_inner_joint - ); - - let (sc_proof_inner, r_y, claims_inner) = SumcheckProof::prove_cubic( - &claim_inner_joint, - num_rounds_y, - &mut MultilinearPolynomial::new(E_row.clone()), - &mut MultilinearPolynomial::new(E_col.clone()), - &mut MultilinearPolynomial::new(val), - comb_func, - &mut transcript, - )?; - - let eval_E_row = claims_inner[0]; - let eval_E_col = claims_inner[1]; - let eval_val_A = MultilinearPolynomial::evaluate_with(&pk.S_repr.val_A, &r_y); - let eval_val_B = MultilinearPolynomial::evaluate_with(&pk.S_repr.val_B, &r_y); - let eval_val_C = MultilinearPolynomial::evaluate_with(&pk.S_repr.val_C, &r_y); - - // since all the five polynomials are opened at r_y, - // we can combine them into a single polynomial opened at r_y - transcript.absorb( - b"e", - &[eval_E_row, eval_E_col, eval_val_A, eval_val_B, eval_val_C].as_slice(), - ); - let c = transcript.squeeze(b"c")?; - let eval_sc_inner = eval_E_row - + c * eval_E_col - + c * c * eval_val_A - + c * c * c * eval_val_B - + c * c * c * c * eval_val_C; - let comm_sc_inner = comm_E_row - + comm_E_col * c - + pk.S_comm.comm_val_A * c * c - + pk.S_comm.comm_val_B * c * c * c - + pk.S_comm.comm_val_C * c * c * c * c; - let poly_sc_inner = E_row - .iter() - .zip(E_col.iter()) - .zip(pk.S_repr.val_A.iter()) - .zip(pk.S_repr.val_B.iter()) - .zip(pk.S_repr.val_C.iter()) - .map(|((((x, y), z), m), n)| *x + c * y + c * c * z + c * c * c * m + c * c * c * c * n) - .collect::>(); - - w_u_vec.push(( - PolyEvalWitness { p: poly_sc_inner }, - PolyEvalInstance { - c: comm_sc_inner, - x: r_y, - e: eval_sc_inner, - }, - )); - - // we need to prove that E_row and E_col are well-formed - let (mc_proof, w_u_vec_mem, _eval_z, r_prod) = MemcheckProof::prove( - ck, - &pk.S_repr, - &pk.S_comm, - &mem_row, - &comm_E_row, - &E_row, - &z, - &comm_E_col, - &E_col, - &mut transcript, - )?; - - // add claims from memory-checking - w_u_vec.extend(w_u_vec_mem); - - // we need to prove that eval_z = z(r_prod) = (1-r_prod[0]) * W.w(r_prod[1..]) + r_prod[0] * U.x(r_prod[1..]). - // r_prod was padded, so we now remove the padding - let r_prod_unpad = { - let l = pk.S_repr.N.log_2() - (2 * pk.S.num_vars).log_2(); - r_prod[l..].to_vec() - }; - - let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_prod_unpad[1..]); - w_u_vec.push(( - PolyEvalWitness { p: W.W }, - PolyEvalInstance { - c: U.comm_W, - x: r_prod_unpad[1..].to_vec(), - e: eval_W, - }, - )); - - // We will now reduce a vector of claims of evaluations at different points into claims about them at the same point. - // For example, eval_W =? W(r_y[1..]) and eval_W =? E(r_x) into - // two claims: eval_W_prime =? W(rz) and eval_E_prime =? E(rz) - // We can them combine the two into one: eval_W_prime + gamma * eval_E_prime =? (W + gamma*E)(rz), - // where gamma is a public challenge - // Since commitments to W and E are homomorphic, the verifier can compute a commitment - // to the batched polynomial. - assert!(w_u_vec.len() >= 2); - - let (w_vec, u_vec): (Vec>, Vec>) = - w_u_vec.into_iter().unzip(); - let w_vec_padded = PolyEvalWitness::pad(&w_vec); // pad the polynomials to be of the same size - let u_vec_padded = PolyEvalInstance::pad(&u_vec); // pad the evaluation points - - let powers = |s: &G::Scalar, n: usize| -> Vec { - assert!(n >= 1); - let mut powers = Vec::new(); - powers.push(G::Scalar::one()); - for i in 1..n { - powers.push(powers[i - 1] * s); - } - powers - }; - - // generate a challenge - let rho = transcript.squeeze(b"r")?; - let num_claims = w_vec_padded.len(); - let powers_of_rho = powers(&rho, num_claims); - let claim_batch_joint = u_vec_padded - .iter() - .zip(powers_of_rho.iter()) - .map(|(u, p)| u.e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let mut polys_left: Vec> = w_vec_padded - .iter() - .map(|w| MultilinearPolynomial::new(w.p.clone())) - .collect(); - let mut polys_right: Vec> = u_vec_padded - .iter() - .map(|u| MultilinearPolynomial::new(EqPolynomial::new(u.x.clone()).evals())) - .collect(); - - let num_rounds_z = u_vec_padded[0].x.len(); - let comb_func = |poly_A_comp: &G::Scalar, poly_B_comp: &G::Scalar| -> G::Scalar { - *poly_A_comp * *poly_B_comp - }; - let (sc_proof_batch, r_z, claims_batch) = SumcheckProof::prove_quad_batch( - &claim_batch_joint, - num_rounds_z, - &mut polys_left, - &mut polys_right, - &powers_of_rho, - comb_func, - &mut transcript, - )?; - - let (claims_batch_left, _): (Vec, Vec) = claims_batch; - - transcript.absorb(b"l", &claims_batch_left.as_slice()); - - // we now combine evaluation claims at the same point rz into one - let gamma = transcript.squeeze(b"g")?; - let powers_of_gamma: Vec = powers(&gamma, num_claims); - let comm_joint = u_vec_padded - .iter() - .zip(powers_of_gamma.iter()) - .map(|(u, g_i)| u.c * *g_i) - .fold(Commitment::::default(), |acc, item| acc + item); - let poly_joint = PolyEvalWitness::weighted_sum(&w_vec_padded, &powers_of_gamma); - let eval_joint = claims_batch_left - .iter() - .zip(powers_of_gamma.iter()) - .map(|(e, g_i)| *e * *g_i) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &comm_joint, - &poly_joint.p, - &r_z, - &eval_joint, - )?; - - Ok(RelaxedR1CSSNARK { - sc_proof_outer, - eval_Az, - eval_Bz, - eval_Cz, - eval_E, - comm_E_row, - comm_E_col, - sc_proof_inner, - eval_E_row, - eval_val_A, - eval_val_B, - eval_val_C, - eval_E_col, - - mc_proof, - - eval_W, - - sc_proof_batch, - evals_batch: claims_batch_left, - eval_arg, - }) - } - - /// verifies a proof of satisfiability of a RelaxedR1CS instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = G::TE::new(b"RelaxedR1CSSNARK"); - let mut u_vec: Vec> = Vec::new(); - - // append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript - transcript.absorb(b"C", &vk.S_comm); - transcript.absorb(b"U", U); - - let num_rounds_x = (vk.num_cons as f64).log2() as usize; - - // outer sum-check - let tau = (0..num_rounds_x) - .map(|_i| transcript.squeeze(b"t")) - .collect::, NovaError>>()?; - - let (claim_outer_final, r_x) = - self - .sc_proof_outer - .verify(G::Scalar::zero(), num_rounds_x, 3, &mut transcript)?; - - // verify claim_outer_final - let taus_bound_rx = EqPolynomial::new(tau).evaluate(&r_x); - let claim_outer_final_expected = - taus_bound_rx * (self.eval_Az * self.eval_Bz - U.u * self.eval_Cz - self.eval_E); - if claim_outer_final != claim_outer_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - // absorb the claim about eval_E to be checked later - u_vec.push(PolyEvalInstance { - c: U.comm_E, - x: r_x.clone(), - e: self.eval_E, - }); - - transcript.absorb( - b"o", - &[self.eval_Az, self.eval_Bz, self.eval_Cz, self.eval_E].as_slice(), - ); - - // add claimed oracles - transcript.absorb(b"e", &vec![self.comm_E_row, self.comm_E_col].as_slice()); - - // inner sum-check - let r = transcript.squeeze(b"r")?; - let claim_inner_joint = self.eval_Az + r * self.eval_Bz + r * r * self.eval_Cz; - let num_rounds_y = vk.S_comm.N.log_2(); - - let (claim_inner_final, r_y) = - self - .sc_proof_inner - .verify(claim_inner_joint, num_rounds_y, 3, &mut transcript)?; - - // verify claim_inner_final - let claim_inner_final_expected = self.eval_E_row - * self.eval_E_col - * (self.eval_val_A + r * self.eval_val_B + r * r * self.eval_val_C); - if claim_inner_final != claim_inner_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - // add claims about five polynomials used at the end of the inner sum-check - // since they are all evaluated at r_y, we can batch them into one - transcript.absorb( - b"e", - &[ - self.eval_E_row, - self.eval_E_col, - self.eval_val_A, - self.eval_val_B, - self.eval_val_C, - ] - .as_slice(), - ); - let c = transcript.squeeze(b"c")?; - let eval_sc_inner = self.eval_E_row - + c * self.eval_E_col - + c * c * self.eval_val_A - + c * c * c * self.eval_val_B - + c * c * c * c * self.eval_val_C; - let comm_sc_inner = self.comm_E_row - + self.comm_E_col * c - + vk.S_comm.comm_val_A * c * c - + vk.S_comm.comm_val_B * c * c * c - + vk.S_comm.comm_val_C * c * c * c * c; - - u_vec.push(PolyEvalInstance { - c: comm_sc_inner, - x: r_y, - e: eval_sc_inner, - }); - - let (u_vec_mem, eval_Z, r_prod) = self.mc_proof.verify( - &vk.S_comm, - &self.comm_E_row, - &self.comm_E_col, - &r_x, - &mut transcript, - )?; - - u_vec.extend(u_vec_mem); - - // we verify that eval_z = z(r_prod) = (1-r_prod[0]) * W.w(r_prod[1..]) + r_prod[0] * U.x(r_prod[1..]). - let (eval_Z_expected, r_prod_unpad) = { - // r_prod was padded, so we now remove the padding - let (factor, r_prod_unpad) = { - let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); - - let mut factor = G::Scalar::one(); - for r_p in r_prod.iter().take(l) { - factor *= G::Scalar::one() - r_p - } - - (factor, r_prod[l..].to_vec()) - }; - - let eval_X = { - // constant term - let mut poly_X = vec![(0, U.u)]; - //remaining inputs - poly_X.extend( - (0..U.X.len()) - .map(|i| (i + 1, U.X[i])) - .collect::>(), - ); - SparsePolynomial::new((vk.num_vars as f64).log2() as usize, poly_X) - .evaluate(&r_prod_unpad[1..]) - }; - let eval_Z = - factor * ((G::Scalar::one() - r_prod_unpad[0]) * self.eval_W + r_prod_unpad[0] * eval_X); - - (eval_Z, r_prod_unpad) - }; - - if eval_Z != eval_Z_expected { - return Err(NovaError::IncorrectWitness); - } - - u_vec.push(PolyEvalInstance { - c: U.comm_W, - x: r_prod_unpad[1..].to_vec(), - e: self.eval_W, - }); - - let u_vec_padded = PolyEvalInstance::pad(&u_vec); // pad the evaluation points - - let powers = |s: &G::Scalar, n: usize| -> Vec { - assert!(n >= 1); - let mut powers = Vec::new(); - powers.push(G::Scalar::one()); - for i in 1..n { - powers.push(powers[i - 1] * s); - } - powers - }; - - // generate a challenge - let rho = transcript.squeeze(b"r")?; - let num_claims = u_vec.len(); - let powers_of_rho = powers(&rho, num_claims); - let claim_batch_joint = u_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(u, p)| u.e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let num_rounds_z = u_vec_padded[0].x.len(); - let (claim_batch_final, r_z) = - self - .sc_proof_batch - .verify(claim_batch_joint, num_rounds_z, 2, &mut transcript)?; - - let claim_batch_final_expected = { - let poly_rz = EqPolynomial::new(r_z.clone()); - let evals = u_vec_padded - .iter() - .map(|u| poly_rz.evaluate(&u.x)) - .collect::>(); - - evals - .iter() - .zip(self.evals_batch.iter()) - .zip(powers_of_rho.iter()) - .map(|((e_i, p_i), rho_i)| *e_i * *p_i * rho_i) - .fold(G::Scalar::zero(), |acc, item| acc + item) - }; - - if claim_batch_final != claim_batch_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb(b"l", &self.evals_batch.as_slice()); - - // we now combine evaluation claims at the same point rz into one - let gamma = transcript.squeeze(b"g")?; - let powers_of_gamma: Vec = powers(&gamma, num_claims); - let comm_joint = u_vec_padded - .iter() - .zip(powers_of_gamma.iter()) - .map(|(u, g_i)| u.c * *g_i) - .fold(Commitment::::default(), |acc, item| acc + item); - let eval_joint = self - .evals_batch - .iter() - .zip(powers_of_gamma.iter()) - .map(|(e, g_i)| *e * *g_i) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &comm_joint, - &r_z, - &eval_joint, - &self.eval_arg, - )?; - - Ok(()) - } -} - -// provides direct interfaces to call the SNARK implemented in this module -struct SpartanCircuit> { - z_i: Option>, // inputs to the circuit - sc: SC, // step circuit to be executed -} - -impl> Circuit for SpartanCircuit { - fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { - // obtain the arity information - let arity = self.sc.arity(); - - // Allocate zi. If inputs.zi is not provided, allocate default value 0 - let zero = vec![G::Scalar::zero(); arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.z_i.as_ref().unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - let z_i_plus_one = self.sc.synthesize(&mut cs.namespace(|| "F"), &z_i)?; - - // inputize both z_i and z_i_plus_one - for (j, input) in z_i.iter().enumerate().take(arity) { - let _ = input.inputize(cs.namespace(|| format!("input {j}"))); - } - for (j, output) in z_i_plus_one.iter().enumerate().take(arity) { - let _ = output.inputize(cs.namespace(|| format!("output {j}"))); - } - - Ok(()) - } -} - -/// A type that holds Spartan's prover key -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct SpartanProverKey -where - G: Group, - EE: EvaluationEngineTrait, -{ - F_arity: usize, - S: R1CSShape, - ck: CommitmentKey, - pk: ProverKey, -} - -/// A type that holds Spartan's verifier key -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct SpartanVerifierKey -where - G: Group, - EE: EvaluationEngineTrait, -{ - F_arity: usize, - vk: VerifierKey, -} - -/// A direct SNARK proving a step circuit -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct SpartanSNARK -where - G: Group, - EE: EvaluationEngineTrait, - C: StepCircuit, -{ - comm_W: Commitment, // commitment to the witness - snark: RelaxedR1CSSNARK, // snark proving the witness is satisfying - _p: PhantomData, -} - -impl, C: StepCircuit> - SpartanSNARK -{ - /// Produces prover and verifier keys for Spartan - pub fn setup(sc: C) -> Result<(SpartanProverKey, SpartanVerifierKey), NovaError> { - let F_arity = sc.arity(); - - // construct a circuit that can be synthesized - let circuit: SpartanCircuit = SpartanCircuit { z_i: None, sc }; - - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit.synthesize(&mut cs); - let (S, ck) = cs.r1cs_shape(); - - let (pk, vk) = RelaxedR1CSSNARK::setup(&ck, &S)?; - - let pk = SpartanProverKey { F_arity, S, ck, pk }; - - let vk = SpartanVerifierKey { F_arity, vk }; - - Ok((pk, vk)) - } - - /// Produces a proof of satisfiability of the provided circuit - pub fn prove( - pk: &SpartanProverKey, - sc: C, - z_i: Vec, - ) -> Result { - if z_i.len() != pk.F_arity || sc.output(&z_i).len() != pk.F_arity { - return Err(NovaError::InvalidInitialInputLength); - } - - let mut cs: SatisfyingAssignment = SatisfyingAssignment::new(); - - let circuit: SpartanCircuit = SpartanCircuit { z_i: Some(z_i), sc }; - - let _ = circuit.synthesize(&mut cs); - let (u, w) = cs - .r1cs_instance_and_witness(&pk.S, &pk.ck) - .map_err(|_e| NovaError::UnSat)?; - - // convert the instance and witness to relaxed form - let (u_relaxed, w_relaxed) = ( - RelaxedR1CSInstance::from_r1cs_instance_unchecked(&u.comm_W, &u.X), - RelaxedR1CSWitness::from_r1cs_witness(&pk.S, &w), - ); - - // prove the instance using Spartan - let snark = RelaxedR1CSSNARK::prove(&pk.ck, &pk.pk, &u_relaxed, &w_relaxed)?; - - Ok(SpartanSNARK { - comm_W: u.comm_W, - snark, - _p: Default::default(), - }) - } - - /// Verifies a proof of satisfiability - pub fn verify( - &self, - vk: &SpartanVerifierKey, - z_i: Vec, - z_i_plus_one: Vec, - ) -> Result<(), NovaError> { - // check if z_i and z_i_plus_one have lengths according to the provided arity - if z_i.len() != vk.F_arity || z_i_plus_one.len() != vk.F_arity { - return Err(NovaError::InvalidInitialInputLength); - } - - // construct an instance using the provided commitment to the witness and z_i and z_{i+1} - let u_relaxed = RelaxedR1CSInstance::from_r1cs_instance_unchecked( - &self.comm_W, - &z_i - .into_iter() - .chain(z_i_plus_one.into_iter()) - .collect::>(), - ); - - // verify the snark using the constructed instance - self.snark.verify(&vk.vk, &u_relaxed)?; - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - type G = pasta_curves::pallas::Point; - type EE = crate::provider::ipa_pc::EvaluationEngine; - use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; - use core::marker::PhantomData; - use ff::PrimeField; - - #[derive(Clone, Debug, Default)] - struct CubicCircuit { - _p: PhantomData, - } - - impl StepCircuit for CubicCircuit - where - F: PrimeField, - { - fn arity(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - z: &[AllocatedNum], - ) -> Result>, SynthesisError> { - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are respectively the input and output. - let x = &z[0]; - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) - })?; - - cs.enforce( - || "y = x^3 + x + 5", - |lc| { - lc + x_cu.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - Ok(vec![y]) - } - - fn output(&self, z: &[F]) -> Vec { - vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] - } - } - - #[test] - fn test_spartan_snark() { - let circuit = CubicCircuit::default(); - - // produce keys - let (pk, vk) = - SpartanSNARK::::Scalar>>::setup(circuit.clone()).unwrap(); - - let num_steps = 3; - - // setup inputs - let z0 = vec![::Scalar::zero()]; - let mut z_i = z0; - - for _i in 0..num_steps { - // produce a SNARK - let res = SpartanSNARK::prove(&pk, circuit.clone(), z_i.clone()); - assert!(res.is_ok()); - - let z_i_plus_one = circuit.output(&z_i); - - let snark = res.unwrap(); - - // verify the SNARK - let res = snark.verify(&vk, z_i.clone(), z_i_plus_one.clone()); - assert!(res.is_ok()); - - // set input to the next step - z_i = z_i_plus_one.clone(); - } - - // sanity: check the claimed output with a direct computation of the same - assert_eq!(z_i, vec![::Scalar::from(2460515u64)]); - } -} diff --git a/src/spartan/pp/product.rs b/src/spartan/pp/product.rs deleted file mode 100644 index c3f774d..0000000 --- a/src/spartan/pp/product.rs +++ /dev/null @@ -1,629 +0,0 @@ -use crate::{ - errors::NovaError, - spartan::{ - math::Math, - polynomial::{EqPolynomial, MultilinearPolynomial}, - sumcheck::{CompressedUniPoly, SumcheckProof, UniPoly}, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{commitment::CommitmentEngineTrait, Group, TranscriptEngineTrait}, - Commitment, CommitmentKey, -}; -use core::marker::PhantomData; -use ff::{Field, PrimeField}; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -pub(crate) struct IdentityPolynomial { - ell: usize, - _p: PhantomData, -} - -impl IdentityPolynomial { - pub fn new(ell: usize) -> Self { - IdentityPolynomial { - ell, - _p: Default::default(), - } - } - - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - assert_eq!(self.ell, r.len()); - (0..self.ell) - .map(|i| Scalar::from(2_usize.pow((self.ell - i - 1) as u32) as u64) * r[i]) - .fold(Scalar::zero(), |acc, item| acc + item) - } -} - -impl SumcheckProof { - pub fn prove_cubic_with_additive_term_batched( - claim: &G::Scalar, - num_rounds: usize, - poly_vec: ( - &mut MultilinearPolynomial, - &mut Vec>, - &mut Vec>, - &mut Vec>, - ), - coeffs: &[G::Scalar], - comb_func: F, - transcript: &mut G::TE, - ) -> Result< - ( - Self, - Vec, - (G::Scalar, Vec, Vec, Vec), - ), - NovaError, - > - where - F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar, - { - let (poly_A, poly_B_vec, poly_C_vec, poly_D_vec) = poly_vec; - - let mut e = *claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); - - for _j in 0..num_rounds { - let mut evals: Vec<(G::Scalar, G::Scalar, G::Scalar)> = Vec::new(); - - for ((poly_B, poly_C), poly_D) in poly_B_vec - .iter() - .zip(poly_C_vec.iter()) - .zip(poly_D_vec.iter()) - { - let mut eval_point_0 = G::Scalar::zero(); - let mut eval_point_2 = G::Scalar::zero(); - let mut eval_point_3 = G::Scalar::zero(); - - let len = poly_A.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; - - eval_point_2 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; - - eval_point_3 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - } - - evals.push((eval_point_0, eval_point_2, eval_point_3)); - } - - let evals_combined_0 = (0..evals.len()) - .map(|i| evals[i].0 * coeffs[i]) - .fold(G::Scalar::zero(), |acc, item| acc + item); - let evals_combined_2 = (0..evals.len()) - .map(|i| evals[i].1 * coeffs[i]) - .fold(G::Scalar::zero(), |acc, item| acc + item); - let evals_combined_3 = (0..evals.len()) - .map(|i| evals[i].2 * coeffs[i]) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let evals = vec![ - evals_combined_0, - e - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - // bound all tables to the verifier's challenege - poly_A.bound_poly_var_top(&r_i); - for ((poly_B, poly_C), poly_D) in poly_B_vec - .iter_mut() - .zip(poly_C_vec.iter_mut()) - .zip(poly_D_vec.iter_mut()) - { - poly_B.bound_poly_var_top(&r_i); - poly_C.bound_poly_var_top(&r_i); - poly_D.bound_poly_var_top(&r_i); - } - - e = poly.evaluate(&r_i); - cubic_polys.push(poly.compress()); - } - - let poly_B_final = (0..poly_B_vec.len()).map(|i| poly_B_vec[i][0]).collect(); - let poly_C_final = (0..poly_C_vec.len()).map(|i| poly_C_vec[i][0]).collect(); - let poly_D_final = (0..poly_D_vec.len()).map(|i| poly_D_vec[i][0]).collect(); - let claims_prod = (poly_A[0], poly_B_final, poly_C_final, poly_D_final); - - Ok((SumcheckProof::new(cubic_polys), r, claims_prod)) - } -} - -/// Provides a product argument using the algorithm described by Setty-Lee, 2020 -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProductArgument { - comm_output_vec: Vec>, - sc_proof: SumcheckProof, - eval_left_vec: Vec, - eval_right_vec: Vec, - eval_output_vec: Vec, - eval_input_vec: Vec, - eval_output2_vec: Vec, -} - -impl ProductArgument { - pub fn prove( - ck: &CommitmentKey, - input_vec: &[Vec], // a commitment to the input and the input vector to multiplied together - transcript: &mut G::TE, - ) -> Result< - ( - Self, - Vec, - Vec, - Vec, - Vec<(PolyEvalWitness, PolyEvalInstance)>, - ), - NovaError, - > { - let num_claims = input_vec.len(); - - let compute_layer = |input: &[G::Scalar]| -> (Vec, Vec, Vec) { - let left = (0..input.len() / 2) - .map(|i| input[2 * i]) - .collect::>(); - - let right = (0..input.len() / 2) - .map(|i| input[2 * i + 1]) - .collect::>(); - - assert_eq!(left.len(), right.len()); - - let output = (0..left.len()) - .map(|i| left[i] * right[i]) - .collect::>(); - - (left, right, output) - }; - - // a closure that returns left, right, output, product - let prepare_inputs = - |input: &[G::Scalar]| -> (Vec, Vec, Vec, G::Scalar) { - let mut left: Vec = Vec::new(); - let mut right: Vec = Vec::new(); - let mut output: Vec = Vec::new(); - - let mut out = input.to_vec(); - for _i in 0..input.len().log_2() { - let (l, r, o) = compute_layer(&out); - out = o.clone(); - - left.extend(l); - right.extend(r); - output.extend(o); - } - - // add a dummy product operation to make the left.len() == right.len() == output.len() == input.len() - left.push(output[output.len() - 1]); - right.push(G::Scalar::zero()); - output.push(G::Scalar::zero()); - - // output is stored at the last but one position - let product = output[output.len() - 2]; - - assert_eq!(left.len(), right.len()); - assert_eq!(left.len(), output.len()); - (left, right, output, product) - }; - - let mut left_vec = Vec::new(); - let mut right_vec = Vec::new(); - let mut output_vec = Vec::new(); - let mut prod_vec = Vec::new(); - for input in input_vec { - let (l, r, o, p) = prepare_inputs(input); - left_vec.push(l); - right_vec.push(r); - output_vec.push(o); - prod_vec.push(p); - } - - // commit to the outputs - let comm_output_vec = (0..output_vec.len()) - .into_par_iter() - .map(|i| G::CE::commit(ck, &output_vec[i])) - .collect::>(); - - // absorb the output commitment and the claimed product - transcript.absorb(b"o", &comm_output_vec.as_slice()); - transcript.absorb(b"r", &prod_vec.as_slice()); - - // this assumes all vectors passed have the same length - let num_rounds = output_vec[0].len().log_2(); - - // produce a fresh set of coeffs and a joint claim - let coeff_vec = { - let s = transcript.squeeze(b"r")?; - let mut s_vec = vec![s]; - for i in 1..num_claims { - s_vec.push(s_vec[i - 1] * s); - } - s_vec - }; - - // generate randomness for the eq polynomial - let rand_eq = (0..num_rounds) - .map(|_i| transcript.squeeze(b"e")) - .collect::, NovaError>>()?; - - let mut poly_A = MultilinearPolynomial::new(EqPolynomial::new(rand_eq).evals()); - let mut poly_B_vec = left_vec - .clone() - .into_par_iter() - .map(MultilinearPolynomial::new) - .collect::>(); - let mut poly_C_vec = right_vec - .clone() - .into_par_iter() - .map(MultilinearPolynomial::new) - .collect::>(); - let mut poly_D_vec = output_vec - .clone() - .into_par_iter() - .map(MultilinearPolynomial::new) - .collect::>(); - - let comb_func = - |poly_A_comp: &G::Scalar, - poly_B_comp: &G::Scalar, - poly_C_comp: &G::Scalar, - poly_D_comp: &G::Scalar| - -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; - - let (sc_proof, rand, _claims) = SumcheckProof::prove_cubic_with_additive_term_batched( - &G::Scalar::zero(), - num_rounds, - ( - &mut poly_A, - &mut poly_B_vec, - &mut poly_C_vec, - &mut poly_D_vec, - ), - &coeff_vec, - comb_func, - transcript, - )?; - - // claims[0] is about the Eq polynomial, which the verifier computes directly - // claims[1] =? weighed sum of left(rand) - // claims[2] =? weighted sum of right(rand) - // claims[3] =? weighetd sum of output(rand), which is easy to verify by querying output - // we also need to prove that output(output.len()-2) = claimed_product - let eval_left_vec = (0..left_vec.len()) - .into_par_iter() - .map(|i| MultilinearPolynomial::evaluate_with(&left_vec[i], &rand)) - .collect::>(); - let eval_right_vec = (0..right_vec.len()) - .into_par_iter() - .map(|i| MultilinearPolynomial::evaluate_with(&right_vec[i], &rand)) - .collect::>(); - let eval_output_vec = (0..output_vec.len()) - .into_par_iter() - .map(|i| MultilinearPolynomial::evaluate_with(&output_vec[i], &rand)) - .collect::>(); - - // we now combine eval_left = left(rand) and eval_right = right(rand) - // into claims about input and output - transcript.absorb(b"l", &eval_left_vec.as_slice()); - transcript.absorb(b"r", &eval_right_vec.as_slice()); - transcript.absorb(b"o", &eval_output_vec.as_slice()); - - let c = transcript.squeeze(b"c")?; - - // eval = (G::Scalar::one() - c) * eval_left + c * eval_right - // eval is claimed evaluation of input||output(r, c), which can be proven by proving input(r[1..], c) and output(r[1..], c) - let rand_ext = { - let mut r = rand.clone(); - r.extend(&[c]); - r - }; - let eval_input_vec = (0..input_vec.len()) - .into_par_iter() - .map(|i| MultilinearPolynomial::evaluate_with(&input_vec[i], &rand_ext[1..])) - .collect::>(); - - let eval_output2_vec = (0..output_vec.len()) - .into_par_iter() - .map(|i| MultilinearPolynomial::evaluate_with(&output_vec[i], &rand_ext[1..])) - .collect::>(); - - // add claimed evaluations to the transcript - transcript.absorb(b"i", &eval_input_vec.as_slice()); - transcript.absorb(b"o", &eval_output2_vec.as_slice()); - - // squeeze a challenge to combine multiple claims into one - let powers_of_rho = { - let s = transcript.squeeze(b"r")?; - let mut s_vec = vec![s]; - for i in 1..num_claims { - s_vec.push(s_vec[i - 1] * s); - } - s_vec - }; - - // take weighted sum of input, output, and their commitments - let product = prod_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(e, p)| *e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let eval_output = eval_output_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(e, p)| *e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let comm_output = comm_output_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(c, r_i)| *c * *r_i) - .fold(Commitment::::default(), |acc, item| acc + item); - - let weighted_sum = |W: &[Vec], s: &[G::Scalar]| -> Vec { - assert_eq!(W.len(), s.len()); - let mut p = vec![G::Scalar::zero(); W[0].len()]; - for i in 0..W.len() { - for (j, item) in W[i].iter().enumerate().take(W[i].len()) { - p[j] += *item * s[i] - } - } - p - }; - - let poly_output = weighted_sum(&output_vec, &powers_of_rho); - - let eval_output2 = eval_output2_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(e, p)| *e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let mut w_u_vec = Vec::new(); - - // eval_output = output(rand) - w_u_vec.push(( - PolyEvalWitness { - p: poly_output.clone(), - }, - PolyEvalInstance { - c: comm_output, - x: rand.clone(), - e: eval_output, - }, - )); - - // claimed_product = output(1, ..., 1, 0) - let x = { - let mut x = vec![G::Scalar::one(); rand.len()]; - x[rand.len() - 1] = G::Scalar::zero(); - x - }; - w_u_vec.push(( - PolyEvalWitness { - p: poly_output.clone(), - }, - PolyEvalInstance { - c: comm_output, - x, - e: product, - }, - )); - - // eval_output2 = output(rand_ext[1..]) - w_u_vec.push(( - PolyEvalWitness { p: poly_output }, - PolyEvalInstance { - c: comm_output, - x: rand_ext[1..].to_vec(), - e: eval_output2, - }, - )); - - let prod_arg = Self { - comm_output_vec, - sc_proof, - - // claimed evaluations at rand - eval_left_vec, - eval_right_vec, - eval_output_vec, - - // claimed evaluations at rand_ext[1..] - eval_input_vec: eval_input_vec.clone(), - eval_output2_vec, - }; - - Ok(( - prod_arg, - prod_vec, - rand_ext[1..].to_vec(), - eval_input_vec, - w_u_vec, - )) - } - - pub fn verify( - &self, - prod_vec: &[G::Scalar], // claimed products - len: usize, - transcript: &mut G::TE, - ) -> Result<(Vec, Vec, Vec>), NovaError> { - // absorb the provided commitment and claimed output - transcript.absorb(b"o", &self.comm_output_vec.as_slice()); - transcript.absorb(b"r", &prod_vec.to_vec().as_slice()); - - let num_rounds = len.log_2(); - let num_claims = prod_vec.len(); - - // produce a fresh set of coeffs and a joint claim - let coeff_vec = { - let s = transcript.squeeze(b"r")?; - let mut s_vec = vec![s]; - for i in 1..num_claims { - s_vec.push(s_vec[i - 1] * s); - } - s_vec - }; - - // generate randomness for the eq polynomial - let rand_eq = (0..num_rounds) - .map(|_i| transcript.squeeze(b"e")) - .collect::, NovaError>>()?; - - let (final_claim, rand) = self.sc_proof.verify( - G::Scalar::zero(), // claim - num_rounds, - 3, // degree bound - transcript, - )?; - - // verify the final claim along with output[output.len() - 2 ] = claim - let eq = EqPolynomial::new(rand_eq).evaluate(&rand); - let final_claim_expected = (0..num_claims) - .map(|i| { - coeff_vec[i] - * eq - * (self.eval_left_vec[i] * self.eval_right_vec[i] - self.eval_output_vec[i]) - }) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - if final_claim != final_claim_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb(b"l", &self.eval_left_vec.as_slice()); - transcript.absorb(b"r", &self.eval_right_vec.as_slice()); - transcript.absorb(b"o", &self.eval_output_vec.as_slice()); - - let c = transcript.squeeze(b"c")?; - let eval_vec = self - .eval_left_vec - .iter() - .zip(self.eval_right_vec.iter()) - .map(|(l, r)| (G::Scalar::one() - c) * l + c * r) - .collect::>(); - - // eval is claimed evaluation of input||output(r, c), which can be proven by proving input(r[1..], c) and output(r[1..], c) - let rand_ext = { - let mut r = rand.clone(); - r.extend(&[c]); - r - }; - - for (i, eval) in eval_vec.iter().enumerate() { - if *eval - != (G::Scalar::one() - rand_ext[0]) * self.eval_input_vec[i] - + rand_ext[0] * self.eval_output2_vec[i] - { - return Err(NovaError::InvalidSumcheckProof); - } - } - - transcript.absorb(b"i", &self.eval_input_vec.as_slice()); - transcript.absorb(b"o", &self.eval_output2_vec.as_slice()); - - // squeeze a challenge to combine multiple claims into one - let powers_of_rho = { - let s = transcript.squeeze(b"r")?; - let mut s_vec = vec![s]; - for i in 1..num_claims { - s_vec.push(s_vec[i - 1] * s); - } - s_vec - }; - - // take weighted sum of input, output, and their commitments - let product = prod_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(e, p)| *e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let eval_output = self - .eval_output_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(e, p)| *e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let comm_output = self - .comm_output_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(c, r_i)| *c * *r_i) - .fold(Commitment::::default(), |acc, item| acc + item); - - let eval_output2 = self - .eval_output2_vec - .iter() - .zip(powers_of_rho.iter()) - .map(|(e, p)| *e * p) - .fold(G::Scalar::zero(), |acc, item| acc + item); - - let mut u_vec = Vec::new(); - - // eval_output = output(rand) - u_vec.push(PolyEvalInstance { - c: comm_output, - x: rand.clone(), - e: eval_output, - }); - - // claimed_product = output(1, ..., 1, 0) - let x = { - let mut x = vec![G::Scalar::one(); rand.len()]; - x[rand.len() - 1] = G::Scalar::zero(); - x - }; - u_vec.push(PolyEvalInstance { - c: comm_output, - x, - e: product, - }); - - // eval_output2 = output(rand_ext[1..]) - u_vec.push(PolyEvalInstance { - c: comm_output, - x: rand_ext[1..].to_vec(), - e: eval_output2, - }); - - // input-related claims are checked by the caller - Ok((self.eval_input_vec.clone(), rand_ext[1..].to_vec(), u_vec)) - } -} diff --git a/src/spartan/sumcheck.rs b/src/spartan/sumcheck.rs index 83bbcdc..a80e56b 100644 --- a/src/spartan/sumcheck.rs +++ b/src/spartan/sumcheck.rs @@ -197,94 +197,6 @@ impl SumcheckProof { Ok((SumcheckProof::new(quad_polys), r, claims_prod)) } - pub fn prove_cubic( - claim: &G::Scalar, - num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, - poly_C: &mut MultilinearPolynomial, - comb_func: F, - transcript: &mut G::TE, - ) -> Result<(Self, Vec, Vec), NovaError> - where - F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync, - { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = *claim; - - for _ in 0..num_rounds { - let poly = { - let len = poly_A.len() / 2; - - // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2, eval_point_3) = (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (G::Scalar::zero(), G::Scalar::zero(), G::Scalar::zero()), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ); - - let evals = vec![ - eval_point_0, - claim_per_round - eval_point_0, - eval_point_2, - eval_point_3, - ]; - UniPoly::from_evals(&evals) - }; - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - //derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bound all tables to the verifier's challenege - poly_A.bound_poly_var_top(&r_i); - poly_B.bound_poly_var_top(&r_i); - poly_C.bound_poly_var_top(&r_i); - } - - Ok(( - SumcheckProof { - compressed_polys: polys, - }, - r, - vec![poly_A[0], poly_B[0], poly_C[0]], - )) - } - pub fn prove_cubic_with_additive_term( claim: &G::Scalar, num_rounds: usize,