mirror of
https://github.com/arnaucube/Nova.git
synced 2026-01-11 16:41:28 +01:00
Spartan variant with an IPA-based polynomial commitment scheme for compressing IVC proofs (#80)
* cleanup code * compiles * additional plumbing * add padding * Add missing file * integrate * add a separate test * cleanup * cleanup * add checks for outer sum-check * sum-checks pass * sum-checks pass * sum-checks pass * Add polycommit checks to the end * switch to pasta_msm * clippy * remove int_log * switch to pasta_curves * clippy * clippy * add a special case for bases.len() = 1 * use naive MSM to avoid SIGFE error for smaller MSMs * add rayon parallelism to naive MSM * update comment since we already implement it * address clippy * cleanup map and reduce code * add parallelism to final SNARK creation and verification * add par * add par * add par * add par * store padded shapes in the parameters * Address clippy * pass padded shape in params * pass padded shape in params * cargo fmt * add par * add par * Add par * cleanup with a reorg * factor out spartan-based snark into a separate module * create traits for RelaxedR1CSSNARK * make CompressedSNARK parameterized by a SNARK satisfying our new trait * fix benches * cleanup code * remove unused * move code to Spartan-based SNARK * make unused function private * rename IPA types for clarity * cleanup * return error types; rename r_j to r_i * fix duplicate code
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
//! There are two Verification Circuits. The primary and the secondary.
|
||||
//! Each of them is over a Pasta curve but
|
||||
//! only the primary executes the next step of the computation.
|
||||
//! TODO: The base case is different for the primary and the secondary.
|
||||
//! We have two running instances. Each circuit takes as input 2 hashes: one for each
|
||||
//! of the running instances. Each of these hashes is
|
||||
//! H(params = H(shape, gens), i, z0, zi, U). Each circuit folds the last invocation of
|
||||
@@ -267,7 +266,7 @@ where
|
||||
|
||||
// Compute variable indicating if this is the base case
|
||||
let zero = alloc_zero(cs.namespace(|| "zero"))?;
|
||||
let is_base_case = alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; //TODO: maybe optimize this?
|
||||
let is_base_case = alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?;
|
||||
|
||||
// Synthesize the circuit for the base case and get the new running instance
|
||||
let Unew_base = self.synthesize_base_case(cs.namespace(|| "base case"), u.clone())?;
|
||||
@@ -362,6 +361,7 @@ mod tests {
|
||||
use ff::PrimeField;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TestCircuit<F: PrimeField> {
|
||||
_p: PhantomData<F>,
|
||||
}
|
||||
@@ -430,15 +430,8 @@ mod tests {
|
||||
// Execute the base case for the primary
|
||||
let zero1 = <<G2 as Group>::Base as Field>::zero();
|
||||
let mut cs1: SatisfyingAssignment<G1> = SatisfyingAssignment::new();
|
||||
let inputs1: NIFSVerifierCircuitInputs<G2> = NIFSVerifierCircuitInputs::new(
|
||||
shape2.get_digest(),
|
||||
zero1,
|
||||
zero1, // TODO: Provide real input for z0
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let inputs1: NIFSVerifierCircuitInputs<G2> =
|
||||
NIFSVerifierCircuitInputs::new(shape2.get_digest(), zero1, zero1, None, None, None, None);
|
||||
let circuit1: NIFSVerifierCircuit<G2, TestCircuit<<G2 as Group>::Base>> =
|
||||
NIFSVerifierCircuit::new(
|
||||
params1,
|
||||
|
||||
@@ -9,8 +9,9 @@ use core::{
|
||||
};
|
||||
use ff::Field;
|
||||
use merlin::Transcript;
|
||||
use rayon::prelude::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CommitGens<G: Group> {
|
||||
gens: Vec<G::PreprocessedGroupElement>,
|
||||
_p: PhantomData<G>,
|
||||
@@ -28,13 +29,101 @@ pub struct CompressedCommitment<C: CompressedGroup> {
|
||||
|
||||
impl<G: Group> CommitGens<G> {
|
||||
pub fn new(label: &'static [u8], n: usize) -> Self {
|
||||
let gens = G::from_label(label, n);
|
||||
CommitGens {
|
||||
gens: G::from_label(label, n.next_power_of_two()),
|
||||
_p: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.gens.len()
|
||||
}
|
||||
|
||||
pub fn split_at(&self, n: usize) -> (CommitGens<G>, CommitGens<G>) {
|
||||
(
|
||||
CommitGens {
|
||||
gens: self.gens[0..n].to_vec(),
|
||||
_p: Default::default(),
|
||||
},
|
||||
CommitGens {
|
||||
gens: self.gens[n..].to_vec(),
|
||||
_p: Default::default(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn combine(&self, other: &CommitGens<G>) -> CommitGens<G> {
|
||||
let gens = {
|
||||
let mut c = self.gens.clone();
|
||||
c.extend(other.gens.clone());
|
||||
c
|
||||
};
|
||||
CommitGens {
|
||||
gens,
|
||||
_p: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
// combines the left and right halves of `self` using `w1` and `w2` as the weights
|
||||
pub fn fold(&self, w1: &G::Scalar, w2: &G::Scalar) -> CommitGens<G> {
|
||||
let w = vec![*w1, *w2];
|
||||
let (L, R) = self.split_at(self.len() / 2);
|
||||
|
||||
let gens = (0..self.len() / 2)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
let gens = CommitGens::<G> {
|
||||
gens: [L.gens[i].clone(), R.gens[i].clone()].to_vec(),
|
||||
_p: Default::default(),
|
||||
};
|
||||
w.commit(&gens).comm.preprocessed()
|
||||
})
|
||||
.collect();
|
||||
|
||||
CommitGens {
|
||||
gens,
|
||||
_p: PhantomData::default(),
|
||||
_p: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Scales each element in `self` by `r`
|
||||
pub fn scale(&self, r: &G::Scalar) -> Self {
|
||||
let gens_scaled = self
|
||||
.gens
|
||||
.clone()
|
||||
.into_par_iter()
|
||||
.map(|g| {
|
||||
let gens = CommitGens::<G> {
|
||||
gens: vec![g],
|
||||
_p: Default::default(),
|
||||
};
|
||||
[*r].commit(&gens).comm.preprocessed()
|
||||
})
|
||||
.collect();
|
||||
|
||||
CommitGens {
|
||||
gens: gens_scaled,
|
||||
_p: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// reinterprets a vector of commitments as a set of generators
|
||||
pub fn reinterpret_commitments_as_gens(
|
||||
c: &[CompressedCommitment<G::CompressedGroupElement>],
|
||||
) -> Result<Self, NovaError> {
|
||||
let d = (0..c.len())
|
||||
.into_par_iter()
|
||||
.map(|i| c[i].decompress())
|
||||
.collect::<Result<Vec<Commitment<G>>, NovaError>>()?;
|
||||
let gens = (0..d.len())
|
||||
.into_par_iter()
|
||||
.map(|i| d[i].comm.preprocessed())
|
||||
.collect();
|
||||
Ok(CommitGens {
|
||||
gens,
|
||||
_p: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group> Commitment<G> {
|
||||
|
||||
@@ -20,4 +20,8 @@ pub enum NovaError {
|
||||
ProofVerifyError,
|
||||
/// returned if the provided number of steps is zero
|
||||
InvalidNumSteps,
|
||||
/// returned when an invalid inner product argument is provided
|
||||
InvalidIPA,
|
||||
/// returned when an invalid sum-check proof is provided
|
||||
InvalidSumcheckProof,
|
||||
}
|
||||
|
||||
262
src/lib.rs
262
src/lib.rs
@@ -3,16 +3,21 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
pub mod bellperson;
|
||||
// private modules
|
||||
mod circuit;
|
||||
mod commitments;
|
||||
mod constants;
|
||||
mod nifs;
|
||||
mod poseidon;
|
||||
mod r1cs;
|
||||
|
||||
// public modules
|
||||
pub mod bellperson;
|
||||
pub mod errors;
|
||||
pub mod gadgets;
|
||||
pub mod nifs;
|
||||
pub mod pasta;
|
||||
mod poseidon;
|
||||
pub mod r1cs;
|
||||
pub mod snark;
|
||||
pub mod spartan_with_ipa_pc;
|
||||
pub mod traits;
|
||||
|
||||
use crate::bellperson::{
|
||||
@@ -32,6 +37,7 @@ use poseidon::ROConstantsCircuit; // TODO: make this a trait so we can use it wi
|
||||
use r1cs::{
|
||||
R1CSGens, R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness,
|
||||
};
|
||||
use snark::RelaxedR1CSSNARKTrait;
|
||||
use traits::{AbsorbInROTrait, Group, HashFuncConstantsTrait, HashFuncTrait, StepCircuit};
|
||||
|
||||
type ROConstants<G> =
|
||||
@@ -49,10 +55,12 @@ where
|
||||
ro_consts_circuit_primary: ROConstantsCircuit<<G2 as Group>::Base>,
|
||||
r1cs_gens_primary: R1CSGens<G1>,
|
||||
r1cs_shape_primary: R1CSShape<G1>,
|
||||
r1cs_shape_padded_primary: R1CSShape<G1>,
|
||||
ro_consts_secondary: ROConstants<G2>,
|
||||
ro_consts_circuit_secondary: ROConstantsCircuit<<G1 as Group>::Base>,
|
||||
r1cs_gens_secondary: R1CSGens<G2>,
|
||||
r1cs_shape_secondary: R1CSShape<G2>,
|
||||
r1cs_shape_padded_secondary: R1CSShape<G2>,
|
||||
c_primary: C1,
|
||||
c_secondary: C2,
|
||||
params_primary: NIFSVerifierCircuitParams,
|
||||
@@ -89,6 +97,7 @@ where
|
||||
let mut cs: ShapeCS<G1> = ShapeCS::new();
|
||||
let _ = circuit_primary.synthesize(&mut cs);
|
||||
let (r1cs_shape_primary, r1cs_gens_primary) = (cs.r1cs_shape(), cs.r1cs_gens());
|
||||
let r1cs_shape_padded_primary = r1cs_shape_primary.pad();
|
||||
|
||||
// Initialize gens for the secondary
|
||||
let circuit_secondary: NIFSVerifierCircuit<G1, C2> = NIFSVerifierCircuit::new(
|
||||
@@ -100,16 +109,19 @@ where
|
||||
let mut cs: ShapeCS<G2> = ShapeCS::new();
|
||||
let _ = circuit_secondary.synthesize(&mut cs);
|
||||
let (r1cs_shape_secondary, r1cs_gens_secondary) = (cs.r1cs_shape(), cs.r1cs_gens());
|
||||
let r1cs_shape_padded_secondary = r1cs_shape_secondary.pad();
|
||||
|
||||
Self {
|
||||
ro_consts_primary,
|
||||
ro_consts_circuit_primary,
|
||||
r1cs_gens_primary,
|
||||
r1cs_shape_primary,
|
||||
r1cs_shape_padded_primary,
|
||||
ro_consts_secondary,
|
||||
ro_consts_circuit_secondary,
|
||||
r1cs_gens_secondary,
|
||||
r1cs_shape_secondary,
|
||||
r1cs_shape_padded_secondary,
|
||||
c_primary,
|
||||
c_secondary,
|
||||
params_primary,
|
||||
@@ -366,50 +378,74 @@ where
|
||||
}
|
||||
|
||||
// check the satisfiability of the provided instances
|
||||
pp.r1cs_shape_primary.is_sat_relaxed(
|
||||
&pp.r1cs_gens_primary,
|
||||
&self.r_U_primary,
|
||||
&self.r_W_primary,
|
||||
)?;
|
||||
let ((res_r_primary, res_l_primary), (res_r_secondary, res_l_secondary)) = rayon::join(
|
||||
|| {
|
||||
rayon::join(
|
||||
|| {
|
||||
pp.r1cs_shape_primary.is_sat_relaxed(
|
||||
&pp.r1cs_gens_primary,
|
||||
&self.r_U_primary,
|
||||
&self.r_W_primary,
|
||||
)
|
||||
},
|
||||
|| {
|
||||
pp.r1cs_shape_primary.is_sat(
|
||||
&pp.r1cs_gens_primary,
|
||||
&self.l_u_primary,
|
||||
&self.l_w_primary,
|
||||
)
|
||||
},
|
||||
)
|
||||
},
|
||||
|| {
|
||||
rayon::join(
|
||||
|| {
|
||||
pp.r1cs_shape_secondary.is_sat_relaxed(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&self.r_U_secondary,
|
||||
&self.r_W_secondary,
|
||||
)
|
||||
},
|
||||
|| {
|
||||
pp.r1cs_shape_secondary.is_sat(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&self.l_u_secondary,
|
||||
&self.l_w_secondary,
|
||||
)
|
||||
},
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
pp.r1cs_shape_primary
|
||||
.is_sat(&pp.r1cs_gens_primary, &self.l_u_primary, &self.l_w_primary)?;
|
||||
|
||||
pp.r1cs_shape_secondary.is_sat_relaxed(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&self.r_U_secondary,
|
||||
&self.r_W_secondary,
|
||||
)?;
|
||||
|
||||
pp.r1cs_shape_secondary.is_sat(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&self.l_u_secondary,
|
||||
&self.l_w_secondary,
|
||||
)?;
|
||||
// check the returned res objects
|
||||
res_r_primary?;
|
||||
res_l_primary?;
|
||||
res_r_secondary?;
|
||||
res_l_secondary?;
|
||||
|
||||
Ok((self.zn_primary, self.zn_secondary))
|
||||
}
|
||||
}
|
||||
|
||||
/// A SNARK that proves the knowledge of a valid `RecursiveSNARK`
|
||||
/// For now, it implements a constant factor compression.
|
||||
/// In the future, we will implement an exponential reduction in proof sizes
|
||||
pub struct CompressedSNARK<G1, G2, C1, C2>
|
||||
pub struct CompressedSNARK<G1, G2, C1, C2, S1, S2>
|
||||
where
|
||||
G1: Group<Base = <G2 as Group>::Scalar>,
|
||||
G2: Group<Base = <G1 as Group>::Scalar>,
|
||||
C1: StepCircuit<G1::Scalar> + Clone,
|
||||
C2: StepCircuit<G2::Scalar> + Clone,
|
||||
C1: StepCircuit<G1::Scalar>,
|
||||
C2: StepCircuit<G2::Scalar>,
|
||||
S1: RelaxedR1CSSNARKTrait<G1>,
|
||||
S2: RelaxedR1CSSNARKTrait<G2>,
|
||||
{
|
||||
r_U_primary: RelaxedR1CSInstance<G1>,
|
||||
l_u_primary: R1CSInstance<G1>,
|
||||
nifs_primary: NIFS<G1>,
|
||||
f_W_primary: RelaxedR1CSWitness<G1>,
|
||||
f_W_snark_primary: S1,
|
||||
|
||||
r_U_secondary: RelaxedR1CSInstance<G2>,
|
||||
l_u_secondary: R1CSInstance<G2>,
|
||||
nifs_secondary: NIFS<G2>,
|
||||
f_W_secondary: RelaxedR1CSWitness<G2>,
|
||||
f_W_snark_secondary: S2,
|
||||
|
||||
zn_primary: G1::Scalar,
|
||||
zn_secondary: G2::Scalar,
|
||||
@@ -418,50 +454,84 @@ where
|
||||
_p_c2: PhantomData<C2>,
|
||||
}
|
||||
|
||||
impl<G1, G2, C1, C2> CompressedSNARK<G1, G2, C1, C2>
|
||||
impl<G1, G2, C1, C2, S1, S2> CompressedSNARK<G1, G2, C1, C2, S1, S2>
|
||||
where
|
||||
G1: Group<Base = <G2 as Group>::Scalar>,
|
||||
G2: Group<Base = <G1 as Group>::Scalar>,
|
||||
C1: StepCircuit<G1::Scalar> + Clone,
|
||||
C2: StepCircuit<G2::Scalar> + Clone,
|
||||
C1: StepCircuit<G1::Scalar>,
|
||||
C2: StepCircuit<G2::Scalar>,
|
||||
S1: RelaxedR1CSSNARKTrait<G1>,
|
||||
S2: RelaxedR1CSSNARKTrait<G2>,
|
||||
{
|
||||
/// Create a new `CompressedSNARK`
|
||||
pub fn prove(
|
||||
pp: &PublicParams<G1, G2, C1, C2>,
|
||||
recursive_snark: &RecursiveSNARK<G1, G2, C1, C2>,
|
||||
) -> Result<Self, NovaError> {
|
||||
// fold the primary circuit's instance
|
||||
let (nifs_primary, (_f_U_primary, f_W_primary)) = NIFS::prove(
|
||||
&pp.r1cs_gens_primary,
|
||||
&pp.ro_consts_primary,
|
||||
&pp.r1cs_shape_primary,
|
||||
&recursive_snark.r_U_primary,
|
||||
&recursive_snark.r_W_primary,
|
||||
&recursive_snark.l_u_primary,
|
||||
&recursive_snark.l_w_primary,
|
||||
)?;
|
||||
let (res_primary, res_secondary) = rayon::join(
|
||||
// fold the primary circuit's instance
|
||||
|| {
|
||||
NIFS::prove(
|
||||
&pp.r1cs_gens_primary,
|
||||
&pp.ro_consts_primary,
|
||||
&pp.r1cs_shape_primary,
|
||||
&recursive_snark.r_U_primary,
|
||||
&recursive_snark.r_W_primary,
|
||||
&recursive_snark.l_u_primary,
|
||||
&recursive_snark.l_w_primary,
|
||||
)
|
||||
},
|
||||
|| {
|
||||
// fold the secondary circuit's instance
|
||||
NIFS::prove(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&pp.ro_consts_secondary,
|
||||
&pp.r1cs_shape_secondary,
|
||||
&recursive_snark.r_U_secondary,
|
||||
&recursive_snark.r_W_secondary,
|
||||
&recursive_snark.l_u_secondary,
|
||||
&recursive_snark.l_w_secondary,
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
// fold the secondary circuit's instance
|
||||
let (nifs_secondary, (_f_U_secondary, f_W_secondary)) = NIFS::prove(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&pp.ro_consts_secondary,
|
||||
&pp.r1cs_shape_secondary,
|
||||
&recursive_snark.r_U_secondary,
|
||||
&recursive_snark.r_W_secondary,
|
||||
&recursive_snark.l_u_secondary,
|
||||
&recursive_snark.l_w_secondary,
|
||||
)?;
|
||||
let (nifs_primary, (f_U_primary, f_W_primary)) = res_primary?;
|
||||
let (nifs_secondary, (f_U_secondary, f_W_secondary)) = res_secondary?;
|
||||
|
||||
// produce a prover key for the SNARK
|
||||
let (pk_primary, pk_secondary) = rayon::join(
|
||||
|| S1::prover_key(&pp.r1cs_gens_primary, &pp.r1cs_shape_padded_primary),
|
||||
|| S2::prover_key(&pp.r1cs_gens_secondary, &pp.r1cs_shape_padded_secondary),
|
||||
);
|
||||
|
||||
// create SNARKs proving the knowledge of f_W_primary and f_W_secondary
|
||||
let (f_W_snark_primary, f_W_snark_secondary) = rayon::join(
|
||||
|| {
|
||||
S1::prove(
|
||||
&pk_primary,
|
||||
&f_U_primary,
|
||||
&f_W_primary.pad(&pp.r1cs_shape_padded_primary), // pad the witness since shape was padded
|
||||
)
|
||||
},
|
||||
|| {
|
||||
S2::prove(
|
||||
&pk_secondary,
|
||||
&f_U_secondary,
|
||||
&f_W_secondary.pad(&pp.r1cs_shape_padded_secondary), // pad the witness since the shape was padded
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
r_U_primary: recursive_snark.r_U_primary.clone(),
|
||||
l_u_primary: recursive_snark.l_u_primary.clone(),
|
||||
nifs_primary,
|
||||
f_W_primary,
|
||||
f_W_snark_primary: f_W_snark_primary?,
|
||||
|
||||
r_U_secondary: recursive_snark.r_U_secondary.clone(),
|
||||
l_u_secondary: recursive_snark.l_u_secondary.clone(),
|
||||
nifs_secondary,
|
||||
f_W_secondary,
|
||||
f_W_snark_secondary: f_W_snark_secondary?,
|
||||
|
||||
zn_primary: recursive_snark.zn_primary,
|
||||
zn_secondary: recursive_snark.zn_secondary,
|
||||
@@ -532,15 +602,24 @@ where
|
||||
&self.l_u_secondary,
|
||||
)?;
|
||||
|
||||
// check the satisfiability of the folded instances using the purported folded witnesses
|
||||
pp.r1cs_shape_primary
|
||||
.is_sat_relaxed(&pp.r1cs_gens_primary, &f_U_primary, &self.f_W_primary)?;
|
||||
// produce a verifier key for the SNARK
|
||||
let (vk_primary, vk_secondary) = rayon::join(
|
||||
|| S1::verifier_key(&pp.r1cs_gens_primary, &pp.r1cs_shape_padded_primary),
|
||||
|| S2::verifier_key(&pp.r1cs_gens_secondary, &pp.r1cs_shape_padded_secondary),
|
||||
);
|
||||
|
||||
pp.r1cs_shape_secondary.is_sat_relaxed(
|
||||
&pp.r1cs_gens_secondary,
|
||||
&f_U_secondary,
|
||||
&self.f_W_secondary,
|
||||
)?;
|
||||
// check the satisfiability of the folded instances using SNARKs proving the knowledge of their satisfying witnesses
|
||||
let (res_primary, res_secondary) = rayon::join(
|
||||
|| self.f_W_snark_primary.verify(&vk_primary, &f_U_primary),
|
||||
|| {
|
||||
self
|
||||
.f_W_snark_secondary
|
||||
.verify(&vk_secondary, &f_U_secondary)
|
||||
},
|
||||
);
|
||||
|
||||
res_primary?;
|
||||
res_secondary?;
|
||||
|
||||
Ok((self.zn_primary, self.zn_secondary))
|
||||
}
|
||||
@@ -551,6 +630,8 @@ mod tests {
|
||||
use super::*;
|
||||
type G1 = pasta_curves::pallas::Point;
|
||||
type G2 = pasta_curves::vesta::Point;
|
||||
type S1 = spartan_with_ipa_pc::RelaxedR1CSSNARK<G1>;
|
||||
type S2 = spartan_with_ipa_pc::RelaxedR1CSSNARK<G2>;
|
||||
use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};
|
||||
use ff::PrimeField;
|
||||
use std::marker::PhantomData;
|
||||
@@ -710,9 +791,62 @@ mod tests {
|
||||
}
|
||||
assert_eq!(zn_secondary, zn_secondary_direct);
|
||||
assert_eq!(zn_secondary, <G2 as Group>::Scalar::from(2460515u64));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ivc_nontrivial_with_compression() {
|
||||
// produce public parameters
|
||||
let pp = PublicParams::<
|
||||
G1,
|
||||
G2,
|
||||
TrivialTestCircuit<<G1 as Group>::Scalar>,
|
||||
CubicCircuit<<G2 as Group>::Scalar>,
|
||||
>::setup(
|
||||
TrivialTestCircuit {
|
||||
_p: Default::default(),
|
||||
},
|
||||
CubicCircuit {
|
||||
_p: Default::default(),
|
||||
},
|
||||
);
|
||||
|
||||
let num_steps = 3;
|
||||
|
||||
// produce a recursive SNARK
|
||||
let res = RecursiveSNARK::prove(
|
||||
&pp,
|
||||
num_steps,
|
||||
<G1 as Group>::Scalar::one(),
|
||||
<G2 as Group>::Scalar::zero(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let recursive_snark = res.unwrap();
|
||||
|
||||
// verify the recursive SNARK
|
||||
let res = recursive_snark.verify(
|
||||
&pp,
|
||||
num_steps,
|
||||
<G1 as Group>::Scalar::one(),
|
||||
<G2 as Group>::Scalar::zero(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
|
||||
let (zn_primary, zn_secondary) = res.unwrap();
|
||||
|
||||
// sanity: check the claimed output with a direct computation of the same
|
||||
assert_eq!(zn_primary, <G1 as Group>::Scalar::one());
|
||||
let mut zn_secondary_direct = <G2 as Group>::Scalar::zero();
|
||||
for _i in 0..num_steps {
|
||||
zn_secondary_direct = CubicCircuit {
|
||||
_p: Default::default(),
|
||||
}
|
||||
.compute(&zn_secondary_direct);
|
||||
}
|
||||
assert_eq!(zn_secondary, zn_secondary_direct);
|
||||
assert_eq!(zn_secondary, <G2 as Group>::Scalar::from(2460515u64));
|
||||
|
||||
// produce a compressed SNARK
|
||||
let res = CompressedSNARK::prove(&pp, &recursive_snark);
|
||||
let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &recursive_snark);
|
||||
assert!(res.is_ok());
|
||||
let compressed_snark = res.unwrap();
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ use super::traits::{AbsorbInROTrait, Group, HashFuncTrait};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
/// A SNARK that holds the proof of a step of an incremental computation
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
pub struct NIFS<G: Group> {
|
||||
pub(crate) comm_T: CompressedCommitment<G::CompressedGroupElement>,
|
||||
_p: PhantomData<G>,
|
||||
|
||||
33
src/pasta.rs
33
src/pasta.rs
@@ -10,14 +10,15 @@ use num_bigint::BigInt;
|
||||
use num_traits::Num;
|
||||
use pasta_curves::{
|
||||
self,
|
||||
arithmetic::{CurveAffine, CurveExt},
|
||||
arithmetic::{CurveAffine, CurveExt, Group as OtherGroup},
|
||||
group::{Curve, GroupEncoding},
|
||||
pallas, vesta, Ep, Eq,
|
||||
};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use rayon::prelude::*;
|
||||
use sha3::Shake256;
|
||||
use std::io::Read;
|
||||
use std::{io::Read, ops::Mul};
|
||||
|
||||
//////////////////////////////////////Pallas///////////////////////////////////////////////
|
||||
|
||||
@@ -45,7 +46,19 @@ impl Group for pallas::Point {
|
||||
scalars: &[Self::Scalar],
|
||||
bases: &[Self::PreprocessedGroupElement],
|
||||
) -> Self {
|
||||
pasta_msm::pallas(bases, scalars)
|
||||
if scalars.len() >= 128 {
|
||||
pasta_msm::pallas(bases, scalars)
|
||||
} else {
|
||||
scalars
|
||||
.par_iter()
|
||||
.zip(bases)
|
||||
.map(|(scalar, base)| base.mul(scalar))
|
||||
.reduce(Ep::group_zero, |x, y| x + y)
|
||||
}
|
||||
}
|
||||
|
||||
fn preprocessed(&self) -> Self::PreprocessedGroupElement {
|
||||
self.to_affine()
|
||||
}
|
||||
|
||||
fn compress(&self) -> Self::CompressedGroupElement {
|
||||
@@ -130,13 +143,25 @@ impl Group for vesta::Point {
|
||||
scalars: &[Self::Scalar],
|
||||
bases: &[Self::PreprocessedGroupElement],
|
||||
) -> Self {
|
||||
pasta_msm::vesta(bases, scalars)
|
||||
if scalars.len() >= 128 {
|
||||
pasta_msm::vesta(bases, scalars)
|
||||
} else {
|
||||
scalars
|
||||
.par_iter()
|
||||
.zip(bases)
|
||||
.map(|(scalar, base)| base.mul(scalar))
|
||||
.reduce(Eq::group_zero, |x, y| x + y)
|
||||
}
|
||||
}
|
||||
|
||||
fn compress(&self) -> Self::CompressedGroupElement {
|
||||
VestaCompressedElementWrapper::new(self.to_bytes())
|
||||
}
|
||||
|
||||
fn preprocessed(&self) -> Self::PreprocessedGroupElement {
|
||||
self.to_affine()
|
||||
}
|
||||
|
||||
fn from_label(label: &'static [u8], n: usize) -> Vec<Self::PreprocessedGroupElement> {
|
||||
let mut shake = Shake256::default();
|
||||
shake.input(label);
|
||||
|
||||
160
src/r1cs.rs
160
src/r1cs.rs
@@ -8,6 +8,7 @@ use super::{
|
||||
traits::{AbsorbInROTrait, AppendToTranscriptTrait, Group, HashFuncTrait},
|
||||
};
|
||||
use bellperson_nonnative::{mp::bignat::nat_to_limbs, util::convert::f_to_nat};
|
||||
use core::cmp::max;
|
||||
use ff::{Field, PrimeField};
|
||||
use flate2::{write::ZlibEncoder, Compression};
|
||||
use itertools::concat;
|
||||
@@ -17,20 +18,20 @@ use serde::{Deserialize, Serialize};
|
||||
use sha3::{Digest, Sha3_256};
|
||||
|
||||
/// Public parameters for a given R1CS
|
||||
#[derive(Clone)]
|
||||
pub struct R1CSGens<G: Group> {
|
||||
pub(crate) gens_W: CommitGens<G>, // TODO: avoid pub(crate)
|
||||
pub(crate) gens_E: CommitGens<G>,
|
||||
pub(crate) gens: CommitGens<G>,
|
||||
}
|
||||
|
||||
/// A type that holds the shape of the R1CS matrices
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct R1CSShape<G: Group> {
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_io: usize,
|
||||
A: Vec<(usize, usize, G::Scalar)>,
|
||||
B: Vec<(usize, usize, G::Scalar)>,
|
||||
C: Vec<(usize, usize, G::Scalar)>,
|
||||
pub(crate) num_cons: usize,
|
||||
pub(crate) num_vars: usize,
|
||||
pub(crate) num_io: usize,
|
||||
pub(crate) A: Vec<(usize, usize, G::Scalar)>,
|
||||
pub(crate) B: Vec<(usize, usize, G::Scalar)>,
|
||||
pub(crate) C: Vec<(usize, usize, G::Scalar)>,
|
||||
digest: G::Scalar, // digest of the rest of R1CSShape
|
||||
}
|
||||
|
||||
@@ -50,8 +51,8 @@ pub struct R1CSInstance<G: Group> {
|
||||
/// A type that holds a witness for a given Relaxed R1CS instance
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct RelaxedR1CSWitness<G: Group> {
|
||||
W: Vec<G::Scalar>,
|
||||
E: Vec<G::Scalar>,
|
||||
pub(crate) W: Vec<G::Scalar>,
|
||||
pub(crate) E: Vec<G::Scalar>,
|
||||
}
|
||||
|
||||
/// A type that holds a Relaxed R1CS instance
|
||||
@@ -66,13 +67,9 @@ pub struct RelaxedR1CSInstance<G: Group> {
|
||||
impl<G: Group> R1CSGens<G> {
|
||||
/// Samples public parameters for the specified number of constraints and variables in an R1CS
|
||||
pub fn new(num_cons: usize, num_vars: usize) -> R1CSGens<G> {
|
||||
// generators to commit to witness vector `W`
|
||||
let gens_W = CommitGens::new(b"gens_W", num_vars);
|
||||
|
||||
// generators to commit to the error/slack vector `E`
|
||||
let gens_E = CommitGens::new(b"gens_E", num_cons);
|
||||
|
||||
R1CSGens { gens_E, gens_W }
|
||||
R1CSGens {
|
||||
gens: CommitGens::new(b"gens", max(num_vars, num_cons)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +134,7 @@ impl<G: Group> R1CSShape<G> {
|
||||
Ok(shape)
|
||||
}
|
||||
|
||||
fn multiply_vec(
|
||||
pub fn multiply_vec(
|
||||
&self,
|
||||
z: &[G::Scalar],
|
||||
) -> Result<(Vec<G::Scalar>, Vec<G::Scalar>, Vec<G::Scalar>), NovaError> {
|
||||
@@ -161,9 +158,15 @@ impl<G: Group> R1CSShape<G> {
|
||||
})
|
||||
};
|
||||
|
||||
let Az = sparse_matrix_vec_product(&self.A, self.num_cons, z);
|
||||
let Bz = sparse_matrix_vec_product(&self.B, self.num_cons, z);
|
||||
let Cz = sparse_matrix_vec_product(&self.C, self.num_cons, z);
|
||||
let (Az, (Bz, Cz)) = rayon::join(
|
||||
|| sparse_matrix_vec_product(&self.A, self.num_cons, z),
|
||||
|| {
|
||||
rayon::join(
|
||||
|| sparse_matrix_vec_product(&self.B, self.num_cons, z),
|
||||
|| sparse_matrix_vec_product(&self.C, self.num_cons, z),
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
Ok((Az, Bz, Cz))
|
||||
}
|
||||
@@ -202,9 +205,7 @@ impl<G: Group> R1CSShape<G> {
|
||||
|
||||
// verify if comm_E and comm_W are commitments to E and W
|
||||
let res_comm: bool = {
|
||||
let comm_W = W.W.commit(&gens.gens_W);
|
||||
let comm_E = W.E.commit(&gens.gens_E);
|
||||
|
||||
let (comm_W, comm_E) = rayon::join(|| W.W.commit(&gens.gens), || W.E.commit(&gens.gens));
|
||||
U.comm_W == comm_W && U.comm_E == comm_E
|
||||
};
|
||||
|
||||
@@ -241,7 +242,7 @@ impl<G: Group> R1CSShape<G> {
|
||||
};
|
||||
|
||||
// verify if comm_W is a commitment to W
|
||||
let res_comm: bool = U.comm_W == W.W.commit(&gens.gens_W);
|
||||
let res_comm: bool = U.comm_W == W.W.commit(&gens.gens);
|
||||
|
||||
if res_eq && res_comm {
|
||||
Ok(())
|
||||
@@ -271,15 +272,21 @@ impl<G: Group> R1CSShape<G> {
|
||||
};
|
||||
|
||||
let AZ_1_circ_BZ_2 = (0..AZ_1.len())
|
||||
.into_par_iter()
|
||||
.map(|i| AZ_1[i] * BZ_2[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let AZ_2_circ_BZ_1 = (0..AZ_2.len())
|
||||
.into_par_iter()
|
||||
.map(|i| AZ_2[i] * BZ_1[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let u_1_cdot_CZ_2 = (0..CZ_2.len())
|
||||
.into_par_iter()
|
||||
.map(|i| U1.u * CZ_2[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let u_2_cdot_CZ_1 = (0..CZ_1.len()).map(|i| CZ_1[i]).collect::<Vec<G::Scalar>>();
|
||||
let u_2_cdot_CZ_1 = (0..CZ_1.len())
|
||||
.into_par_iter()
|
||||
.map(|i| CZ_1[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let T = AZ_1_circ_BZ_2
|
||||
.par_iter()
|
||||
@@ -289,7 +296,7 @@ impl<G: Group> R1CSShape<G> {
|
||||
.map(|(((a, b), c), d)| *a + *b - *c - *d)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let comm_T = T.commit(&gens.gens_E);
|
||||
let comm_T = T.commit(&gens.gens);
|
||||
|
||||
Ok((T, comm_T))
|
||||
}
|
||||
@@ -312,15 +319,15 @@ impl<G: Group> R1CSShape<G> {
|
||||
num_vars,
|
||||
num_io,
|
||||
A: A
|
||||
.iter()
|
||||
.par_iter()
|
||||
.map(|(i, j, v)| (*i, *j, v.to_repr().as_ref().to_vec()))
|
||||
.collect(),
|
||||
B: B
|
||||
.iter()
|
||||
.par_iter()
|
||||
.map(|(i, j, v)| (*i, *j, v.to_repr().as_ref().to_vec()))
|
||||
.collect(),
|
||||
C: C
|
||||
.iter()
|
||||
.par_iter()
|
||||
.map(|(i, j, v)| (*i, *j, v.to_repr().as_ref().to_vec()))
|
||||
.collect(),
|
||||
};
|
||||
@@ -353,6 +360,78 @@ impl<G: Group> R1CSShape<G> {
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Pads the R1CSShape so that the number of variables is a power of two
|
||||
/// Renumbers variables to accomodate padded variables
|
||||
pub fn pad(&self) -> Self {
|
||||
// check if the provided R1CSShape is already as required
|
||||
if self.num_vars.next_power_of_two() == self.num_vars
|
||||
&& self.num_cons.next_power_of_two() == self.num_cons
|
||||
{
|
||||
return self.clone();
|
||||
}
|
||||
|
||||
// check if the number of variables are as expected, then
|
||||
// we simply set the number of constraints to the next power of two
|
||||
if self.num_vars.next_power_of_two() == self.num_vars {
|
||||
let digest = Self::compute_digest(
|
||||
self.num_cons.next_power_of_two(),
|
||||
self.num_vars,
|
||||
self.num_io,
|
||||
&self.A,
|
||||
&self.B,
|
||||
&self.C,
|
||||
);
|
||||
|
||||
return R1CSShape {
|
||||
num_cons: self.num_cons.next_power_of_two(),
|
||||
num_vars: self.num_vars,
|
||||
num_io: self.num_io,
|
||||
A: self.A.clone(),
|
||||
B: self.B.clone(),
|
||||
C: self.C.clone(),
|
||||
digest,
|
||||
};
|
||||
}
|
||||
|
||||
// otherwise, we need to pad the number of variables and renumber variable accesses
|
||||
let num_vars_padded = self.num_vars.next_power_of_two();
|
||||
let num_cons_padded = self.num_cons.next_power_of_two();
|
||||
let apply_pad = |M: &[(usize, usize, G::Scalar)]| -> Vec<(usize, usize, G::Scalar)> {
|
||||
M.par_iter()
|
||||
.map(|(r, c, v)| {
|
||||
if c >= &self.num_vars {
|
||||
(*r, c + num_vars_padded - self.num_vars, *v)
|
||||
} else {
|
||||
(*r, *c, *v)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let A_padded = apply_pad(&self.A);
|
||||
let B_padded = apply_pad(&self.B);
|
||||
let C_padded = apply_pad(&self.C);
|
||||
|
||||
let digest = Self::compute_digest(
|
||||
num_cons_padded,
|
||||
num_vars_padded,
|
||||
self.num_io,
|
||||
&A_padded,
|
||||
&B_padded,
|
||||
&C_padded,
|
||||
);
|
||||
|
||||
R1CSShape {
|
||||
num_cons: num_cons_padded,
|
||||
num_vars: num_vars_padded,
|
||||
num_io: self.num_io,
|
||||
A: A_padded,
|
||||
B: B_padded,
|
||||
C: C_padded,
|
||||
digest,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@@ -391,7 +470,7 @@ impl<G: Group> R1CSWitness<G> {
|
||||
|
||||
/// Commits to the witness using the supplied generators
|
||||
pub fn commit(&self, gens: &R1CSGens<G>) -> Commitment<G> {
|
||||
self.W.commit(&gens.gens_W)
|
||||
self.W.commit(&gens.gens)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,7 +527,7 @@ impl<G: Group> RelaxedR1CSWitness<G> {
|
||||
|
||||
/// Commits to the witness using the supplied generators
|
||||
pub fn commit(&self, gens: &R1CSGens<G>) -> (Commitment<G>, Commitment<G>) {
|
||||
(self.W.commit(&gens.gens_W), self.E.commit(&gens.gens_E))
|
||||
(self.W.commit(&gens.gens), self.E.commit(&gens.gens))
|
||||
}
|
||||
|
||||
/// Folds an incoming R1CSWitness into the current one
|
||||
@@ -477,6 +556,23 @@ impl<G: Group> RelaxedR1CSWitness<G> {
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
Ok(RelaxedR1CSWitness { W, E })
|
||||
}
|
||||
|
||||
/// Pads the provided witness to the correct length
|
||||
pub fn pad(&self, S: &R1CSShape<G>) -> RelaxedR1CSWitness<G> {
|
||||
let W = {
|
||||
let mut W = self.W.clone();
|
||||
W.extend(vec![G::Scalar::zero(); S.num_vars - W.len()]);
|
||||
W
|
||||
};
|
||||
|
||||
let E = {
|
||||
let mut E = self.E.clone();
|
||||
E.extend(vec![G::Scalar::zero(); S.num_cons - E.len()]);
|
||||
E
|
||||
};
|
||||
|
||||
Self { W, E }
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group> RelaxedR1CSInstance<G> {
|
||||
|
||||
47
src/snark.rs
Normal file
47
src/snark.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
//! A collection of traits that define the behavior of a zkSNARK for RelaxedR1CS
|
||||
use super::{
|
||||
errors::NovaError,
|
||||
r1cs::{R1CSGens, R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness},
|
||||
traits::Group,
|
||||
};
|
||||
|
||||
/// A trait that defines the behavior of a zkSNARK's prover key
|
||||
pub trait ProverKeyTrait<G: Group>: Send + Sync {
|
||||
/// Produces a new prover's key
|
||||
fn new(gens: &R1CSGens<G>, S: &R1CSShape<G>) -> Self;
|
||||
}
|
||||
|
||||
/// A trait that defines the behavior of a zkSNARK's verifier key
|
||||
pub trait VerifierKeyTrait<G: Group>: Send + Sync {
|
||||
/// Produces a new verifier's key
|
||||
fn new(gens: &R1CSGens<G>, S: &R1CSShape<G>) -> Self;
|
||||
}
|
||||
|
||||
/// A trait that defines the behavior of a zkSNARK
|
||||
pub trait RelaxedR1CSSNARKTrait<G: Group>: Sized + Send + Sync {
|
||||
/// A type that represents the prover's key
|
||||
type ProverKey: ProverKeyTrait<G>;
|
||||
|
||||
/// A type that represents the verifier's key
|
||||
type VerifierKey: VerifierKeyTrait<G>;
|
||||
|
||||
/// Produces a prover key
|
||||
fn prover_key(gens: &R1CSGens<G>, S: &R1CSShape<G>) -> Self::ProverKey {
|
||||
Self::ProverKey::new(gens, S)
|
||||
}
|
||||
|
||||
/// Produces a verifier key
|
||||
fn verifier_key(gens: &R1CSGens<G>, S: &R1CSShape<G>) -> Self::VerifierKey {
|
||||
Self::VerifierKey::new(gens, S)
|
||||
}
|
||||
|
||||
/// Produces a new SNARK for a relaxed R1CS
|
||||
fn prove(
|
||||
pk: &Self::ProverKey,
|
||||
U: &RelaxedR1CSInstance<G>,
|
||||
W: &RelaxedR1CSWitness<G>,
|
||||
) -> Result<Self, NovaError>;
|
||||
|
||||
/// Verifies a SNARK for a relaxed R1CS
|
||||
fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance<G>) -> Result<(), NovaError>;
|
||||
}
|
||||
399
src/spartan_with_ipa_pc/ipa.rs
Normal file
399
src/spartan_with_ipa_pc/ipa.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
use crate::commitments::{CommitGens, CommitTrait, Commitment, CompressedCommitment};
|
||||
use crate::errors::NovaError;
|
||||
use crate::traits::{AppendToTranscriptTrait, ChallengeTrait, Group};
|
||||
use core::iter;
|
||||
use ff::Field;
|
||||
use merlin::Transcript;
|
||||
use rayon::prelude::*;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
pub fn inner_product<T>(a: &[T], b: &[T]) -> T
|
||||
where
|
||||
T: Field + Send + Sync,
|
||||
{
|
||||
assert_eq!(a.len(), b.len());
|
||||
(0..a.len())
|
||||
.into_par_iter()
|
||||
.map(|i| a[i] * b[i])
|
||||
.reduce(T::zero, |x, y| x + y)
|
||||
}
|
||||
|
||||
/// An inner product instance consists of a commitment to a vector `a` and another vector `b`
|
||||
/// and the claim that c = <a, b>.
|
||||
pub struct InnerProductInstance<G: Group> {
|
||||
comm_a_vec: Commitment<G>,
|
||||
b_vec: Vec<G::Scalar>,
|
||||
c: G::Scalar,
|
||||
}
|
||||
|
||||
impl<G: Group> InnerProductInstance<G> {
|
||||
pub fn new(comm_a_vec: &Commitment<G>, b_vec: &[G::Scalar], c: &G::Scalar) -> Self {
|
||||
InnerProductInstance {
|
||||
comm_a_vec: *comm_a_vec,
|
||||
b_vec: b_vec.to_vec(),
|
||||
c: *c,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct InnerProductWitness<G: Group> {
|
||||
a_vec: Vec<G::Scalar>,
|
||||
}
|
||||
|
||||
impl<G: Group> InnerProductWitness<G> {
|
||||
pub fn new(a_vec: &[G::Scalar]) -> Self {
|
||||
InnerProductWitness {
|
||||
a_vec: a_vec.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A non-interactive folding scheme (NIFS) for inner product relations
|
||||
pub struct NIFSForInnerProduct<G: Group> {
|
||||
cross_term: G::Scalar,
|
||||
}
|
||||
|
||||
impl<G: Group> NIFSForInnerProduct<G> {
|
||||
pub fn protocol_name() -> &'static [u8] {
|
||||
b"NIFSForInnerProduct"
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
U1: &InnerProductInstance<G>,
|
||||
W1: &InnerProductWitness<G>,
|
||||
U2: &InnerProductInstance<G>,
|
||||
W2: &InnerProductWitness<G>,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Self, InnerProductInstance<G>, InnerProductWitness<G>) {
|
||||
transcript.append_message(b"protocol-name", Self::protocol_name());
|
||||
|
||||
// add the two commitments and two public vectors to the transcript
|
||||
U1.comm_a_vec
|
||||
.append_to_transcript(b"U1_comm_a_vec", transcript);
|
||||
U1.b_vec.append_to_transcript(b"U1_b_vec", transcript);
|
||||
U2.comm_a_vec
|
||||
.append_to_transcript(b"U2_comm_a_vec", transcript);
|
||||
U2.b_vec.append_to_transcript(b"U2_b_vec", transcript);
|
||||
|
||||
// compute the cross-term
|
||||
let cross_term = inner_product(&W1.a_vec, &U2.b_vec) + inner_product(&W2.a_vec, &U1.b_vec);
|
||||
|
||||
// add the cross-term to the transcript
|
||||
cross_term.append_to_transcript(b"cross_term", transcript);
|
||||
|
||||
// obtain a random challenge
|
||||
let r = G::Scalar::challenge(b"r", transcript);
|
||||
|
||||
// fold the vectors and their inner product
|
||||
let a_vec = W1
|
||||
.a_vec
|
||||
.par_iter()
|
||||
.zip(W2.a_vec.par_iter())
|
||||
.map(|(x1, x2)| *x1 + r * x2)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let b_vec = U1
|
||||
.b_vec
|
||||
.par_iter()
|
||||
.zip(U2.b_vec.par_iter())
|
||||
.map(|(a1, a2)| *a1 + r * a2)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let c = U1.c + r * r * U2.c + r * cross_term;
|
||||
let comm_a_vec = U1.comm_a_vec + U2.comm_a_vec * r;
|
||||
|
||||
let W = InnerProductWitness { a_vec };
|
||||
let U = InnerProductInstance {
|
||||
comm_a_vec,
|
||||
b_vec,
|
||||
c,
|
||||
};
|
||||
|
||||
(NIFSForInnerProduct { cross_term }, U, W)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
U1: &InnerProductInstance<G>,
|
||||
U2: &InnerProductInstance<G>,
|
||||
transcript: &mut Transcript,
|
||||
) -> InnerProductInstance<G> {
|
||||
transcript.append_message(b"protocol-name", Self::protocol_name());
|
||||
|
||||
// add the two commitments and two public vectors to the transcript
|
||||
U1.comm_a_vec
|
||||
.append_to_transcript(b"U1_comm_a_vec", transcript);
|
||||
U1.b_vec.append_to_transcript(b"U1_b_vec", transcript);
|
||||
U2.comm_a_vec
|
||||
.append_to_transcript(b"U2_comm_a_vec", transcript);
|
||||
U2.b_vec.append_to_transcript(b"U2_b_vec", transcript);
|
||||
|
||||
// add the cross-term to the transcript
|
||||
self
|
||||
.cross_term
|
||||
.append_to_transcript(b"cross_term", transcript);
|
||||
|
||||
// obtain a random challenge
|
||||
let r = G::Scalar::challenge(b"r", transcript);
|
||||
|
||||
// fold the vectors and their inner product
|
||||
let b_vec = U1
|
||||
.b_vec
|
||||
.par_iter()
|
||||
.zip(U2.b_vec.par_iter())
|
||||
.map(|(a1, a2)| *a1 + r * a2)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
let c = U1.c + r * r * U2.c + r * self.cross_term;
|
||||
let comm_a_vec = U1.comm_a_vec + U2.comm_a_vec * r;
|
||||
|
||||
InnerProductInstance {
|
||||
comm_a_vec,
|
||||
b_vec,
|
||||
c,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An inner product argument
|
||||
#[derive(Debug)]
|
||||
pub struct InnerProductArgument<G: Group> {
|
||||
L_vec: Vec<CompressedCommitment<G::CompressedGroupElement>>,
|
||||
R_vec: Vec<CompressedCommitment<G::CompressedGroupElement>>,
|
||||
a_hat: G::Scalar,
|
||||
_p: PhantomData<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> InnerProductArgument<G> {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"inner product argument"
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
gens: &CommitGens<G>,
|
||||
gens_c: &CommitGens<G>,
|
||||
U: &InnerProductInstance<G>,
|
||||
W: &InnerProductWitness<G>,
|
||||
transcript: &mut Transcript,
|
||||
) -> Result<Self, NovaError> {
|
||||
transcript.append_message(b"protocol-name", Self::protocol_name());
|
||||
|
||||
if U.b_vec.len() != W.a_vec.len() {
|
||||
return Err(NovaError::InvalidInputLength);
|
||||
}
|
||||
|
||||
U.comm_a_vec.append_to_transcript(b"comm_a_vec", transcript);
|
||||
U.b_vec.append_to_transcript(b"b_vec", transcript);
|
||||
U.c.append_to_transcript(b"c", transcript);
|
||||
|
||||
// sample a random base for commiting to the inner product
|
||||
let r = G::Scalar::challenge(b"r", transcript);
|
||||
let gens_c = gens_c.scale(&r);
|
||||
|
||||
// a closure that executes a step of the recursive inner product argument
|
||||
let prove_inner = |a_vec: &[G::Scalar],
|
||||
b_vec: &[G::Scalar],
|
||||
gens: &CommitGens<G>,
|
||||
transcript: &mut Transcript|
|
||||
-> Result<
|
||||
(
|
||||
CompressedCommitment<G::CompressedGroupElement>,
|
||||
CompressedCommitment<G::CompressedGroupElement>,
|
||||
Vec<G::Scalar>,
|
||||
Vec<G::Scalar>,
|
||||
CommitGens<G>,
|
||||
),
|
||||
NovaError,
|
||||
> {
|
||||
let n = a_vec.len();
|
||||
let (gens_L, gens_R) = gens.split_at(n / 2);
|
||||
|
||||
let c_L = inner_product(&a_vec[0..n / 2], &b_vec[n / 2..n]);
|
||||
let c_R = inner_product(&a_vec[n / 2..n], &b_vec[0..n / 2]);
|
||||
|
||||
let L = a_vec[0..n / 2]
|
||||
.iter()
|
||||
.chain(iter::once(&c_L))
|
||||
.copied()
|
||||
.collect::<Vec<G::Scalar>>()
|
||||
.commit(&gens_R.combine(&gens_c))
|
||||
.compress();
|
||||
let R = a_vec[n / 2..n]
|
||||
.iter()
|
||||
.chain(iter::once(&c_R))
|
||||
.copied()
|
||||
.collect::<Vec<G::Scalar>>()
|
||||
.commit(&gens_L.combine(&gens_c))
|
||||
.compress();
|
||||
|
||||
L.append_to_transcript(b"L", transcript);
|
||||
R.append_to_transcript(b"R", transcript);
|
||||
|
||||
let r = G::Scalar::challenge(b"challenge_r", transcript);
|
||||
let r_inverse = r.invert().unwrap();
|
||||
|
||||
// fold the left half and the right half
|
||||
let a_vec_folded = a_vec[0..n / 2]
|
||||
.par_iter()
|
||||
.zip(a_vec[n / 2..n].par_iter())
|
||||
.map(|(a_L, a_R)| *a_L * r + r_inverse * *a_R)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let b_vec_folded = b_vec[0..n / 2]
|
||||
.par_iter()
|
||||
.zip(b_vec[n / 2..n].par_iter())
|
||||
.map(|(b_L, b_R)| *b_L * r_inverse + r * *b_R)
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let gens_folded = gens.fold(&r_inverse, &r);
|
||||
|
||||
Ok((L, R, a_vec_folded, b_vec_folded, gens_folded))
|
||||
};
|
||||
|
||||
// two vectors to hold the logarithmic number of group elements
|
||||
let mut L_vec: Vec<CompressedCommitment<G::CompressedGroupElement>> = Vec::new();
|
||||
let mut R_vec: Vec<CompressedCommitment<G::CompressedGroupElement>> = Vec::new();
|
||||
|
||||
// we create mutable copies of vectors and generators
|
||||
let mut a_vec = W.a_vec.to_vec();
|
||||
let mut b_vec = U.b_vec.to_vec();
|
||||
let mut gens = gens.clone();
|
||||
for _i in 0..(U.b_vec.len() as f64).log2() as usize {
|
||||
let (L, R, a_vec_folded, b_vec_folded, gens_folded) =
|
||||
prove_inner(&a_vec, &b_vec, &gens, transcript)?;
|
||||
L_vec.push(L);
|
||||
R_vec.push(R);
|
||||
|
||||
a_vec = a_vec_folded;
|
||||
b_vec = b_vec_folded;
|
||||
gens = gens_folded;
|
||||
}
|
||||
|
||||
Ok(InnerProductArgument {
|
||||
L_vec,
|
||||
R_vec,
|
||||
a_hat: a_vec[0],
|
||||
_p: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens: &CommitGens<G>,
|
||||
gens_c: &CommitGens<G>,
|
||||
n: usize,
|
||||
U: &InnerProductInstance<G>,
|
||||
transcript: &mut Transcript,
|
||||
) -> Result<(), NovaError> {
|
||||
transcript.append_message(b"protocol-name", Self::protocol_name());
|
||||
if U.b_vec.len() != n
|
||||
|| n != (1 << self.L_vec.len())
|
||||
|| self.L_vec.len() != self.R_vec.len()
|
||||
|| self.L_vec.len() >= 32
|
||||
{
|
||||
return Err(NovaError::InvalidInputLength);
|
||||
}
|
||||
|
||||
U.comm_a_vec.append_to_transcript(b"comm_a_vec", transcript);
|
||||
U.b_vec.append_to_transcript(b"b_vec", transcript);
|
||||
U.c.append_to_transcript(b"c", transcript);
|
||||
|
||||
// sample a random base for commiting to the inner product
|
||||
let r = G::Scalar::challenge(b"r", transcript);
|
||||
let gens_c = gens_c.scale(&r);
|
||||
|
||||
let P = U.comm_a_vec + [U.c].commit(&gens_c);
|
||||
|
||||
let batch_invert = |v: &[G::Scalar]| -> Result<Vec<G::Scalar>, NovaError> {
|
||||
let mut products = vec![G::Scalar::zero(); v.len()];
|
||||
let mut acc = G::Scalar::one();
|
||||
|
||||
for i in 0..v.len() {
|
||||
products[i] = acc;
|
||||
acc *= v[i];
|
||||
}
|
||||
|
||||
// we can compute an inversion only if acc is non-zero
|
||||
if acc == G::Scalar::zero() {
|
||||
return Err(NovaError::InvalidInputLength);
|
||||
}
|
||||
|
||||
// compute the inverse once for all entries
|
||||
acc = acc.invert().unwrap();
|
||||
|
||||
let mut inv = vec![G::Scalar::zero(); v.len()];
|
||||
for i in 0..v.len() {
|
||||
let tmp = acc * v[v.len() - 1 - i];
|
||||
inv[v.len() - 1 - i] = products[v.len() - 1 - i] * acc;
|
||||
acc = tmp;
|
||||
}
|
||||
|
||||
Ok(inv)
|
||||
};
|
||||
|
||||
// compute a vector of public coins using self.L_vec and self.R_vec
|
||||
let r = (0..self.L_vec.len())
|
||||
.map(|i| {
|
||||
self.L_vec[i].append_to_transcript(b"L", transcript);
|
||||
self.R_vec[i].append_to_transcript(b"R", transcript);
|
||||
G::Scalar::challenge(b"challenge_r", transcript)
|
||||
})
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
// precompute scalars necessary for verification
|
||||
let r_square: Vec<G::Scalar> = (0..self.L_vec.len())
|
||||
.into_par_iter()
|
||||
.map(|i| r[i] * r[i])
|
||||
.collect();
|
||||
let r_inverse = batch_invert(&r)?;
|
||||
let r_inverse_square: Vec<G::Scalar> = (0..self.L_vec.len())
|
||||
.into_par_iter()
|
||||
.map(|i| r_inverse[i] * r_inverse[i])
|
||||
.collect();
|
||||
|
||||
// compute the vector with the tensor structure
|
||||
let s = {
|
||||
let mut s = vec![G::Scalar::zero(); n];
|
||||
s[0] = {
|
||||
let mut v = G::Scalar::one();
|
||||
for r_inverse_i in &r_inverse {
|
||||
v *= r_inverse_i;
|
||||
}
|
||||
v
|
||||
};
|
||||
for i in 1..n {
|
||||
let pos_in_r = (31 - (i as u32).leading_zeros()) as usize;
|
||||
s[i] = s[i - (1 << pos_in_r)] * r_square[(self.L_vec.len() - 1) - pos_in_r];
|
||||
}
|
||||
s
|
||||
};
|
||||
|
||||
let gens_hat = {
|
||||
let c = s.commit(gens).compress();
|
||||
CommitGens::reinterpret_commitments_as_gens(&[c])?
|
||||
};
|
||||
|
||||
let b_hat = inner_product(&U.b_vec, &s);
|
||||
|
||||
let P_hat = {
|
||||
let gens_folded = {
|
||||
let gens_L = CommitGens::reinterpret_commitments_as_gens(&self.L_vec)?;
|
||||
let gens_R = CommitGens::reinterpret_commitments_as_gens(&self.R_vec)?;
|
||||
let gens_P = CommitGens::reinterpret_commitments_as_gens(&[P.compress()])?;
|
||||
gens_L.combine(&gens_R).combine(&gens_P)
|
||||
};
|
||||
r_square
|
||||
.iter()
|
||||
.chain(r_inverse_square.iter())
|
||||
.chain(iter::once(&G::Scalar::one()))
|
||||
.copied()
|
||||
.collect::<Vec<G::Scalar>>()
|
||||
.commit(&gens_folded)
|
||||
};
|
||||
|
||||
if P_hat == [self.a_hat, self.a_hat * b_hat].commit(&gens_hat.combine(&gens_c)) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(NovaError::InvalidIPA)
|
||||
}
|
||||
}
|
||||
}
|
||||
381
src/spartan_with_ipa_pc/mod.rs
Normal file
381
src/spartan_with_ipa_pc/mod.rs
Normal file
@@ -0,0 +1,381 @@
|
||||
//! This module implements RelaxedR1CSSNARKTrait using a Spartan variant
|
||||
//! instantiated with an IPA-based polynomial commitment scheme
|
||||
mod ipa;
|
||||
mod polynomial;
|
||||
mod sumcheck;
|
||||
|
||||
use super::{
|
||||
commitments::CommitGens,
|
||||
errors::NovaError,
|
||||
r1cs::{R1CSGens, R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness},
|
||||
snark::{ProverKeyTrait, RelaxedR1CSSNARKTrait, VerifierKeyTrait},
|
||||
traits::{AppendToTranscriptTrait, ChallengeTrait, Group},
|
||||
};
|
||||
use core::cmp::max;
|
||||
use ff::Field;
|
||||
use ipa::{InnerProductArgument, InnerProductInstance, InnerProductWitness, NIFSForInnerProduct};
|
||||
use itertools::concat;
|
||||
use merlin::Transcript;
|
||||
use polynomial::{EqPolynomial, MultilinearPolynomial, SparsePolynomial};
|
||||
use rayon::prelude::*;
|
||||
use sumcheck::SumcheckProof;
|
||||
|
||||
/// A type that represents the prover's key
|
||||
pub struct ProverKey<G: Group> {
|
||||
gens_r1cs: R1CSGens<G>,
|
||||
gens_ipa: CommitGens<G>,
|
||||
S: R1CSShape<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> ProverKeyTrait<G> for ProverKey<G> {
|
||||
fn new(gens: &R1CSGens<G>, S: &R1CSShape<G>) -> Self {
|
||||
ProverKey {
|
||||
gens_r1cs: gens.clone(),
|
||||
gens_ipa: CommitGens::new(b"ipa", 1),
|
||||
S: S.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A type that represents the verifier's key
|
||||
pub struct VerifierKey<G: Group> {
|
||||
gens_r1cs: R1CSGens<G>,
|
||||
gens_ipa: CommitGens<G>,
|
||||
S: R1CSShape<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> VerifierKeyTrait<G> for VerifierKey<G> {
|
||||
fn new(gens: &R1CSGens<G>, S: &R1CSShape<G>) -> Self {
|
||||
VerifierKey {
|
||||
gens_r1cs: gens.clone(),
|
||||
gens_ipa: CommitGens::new(b"ipa", 1),
|
||||
S: S.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A succinct proof of knowledge of a witness to a relaxed R1CS instance
|
||||
/// The proof is produced using Spartan's combination of the sum-check and
|
||||
/// the commitment to a vector viewed as a polynomial commitment
|
||||
pub struct RelaxedR1CSSNARK<G: Group> {
|
||||
sc_proof_outer: SumcheckProof<G>,
|
||||
claims_outer: (G::Scalar, G::Scalar, G::Scalar),
|
||||
sc_proof_inner: SumcheckProof<G>,
|
||||
eval_E: G::Scalar,
|
||||
eval_W: G::Scalar,
|
||||
nifs_ip: NIFSForInnerProduct<G>,
|
||||
ipa: InnerProductArgument<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> RelaxedR1CSSNARKTrait<G> for RelaxedR1CSSNARK<G> {
|
||||
type ProverKey = ProverKey<G>;
|
||||
type VerifierKey = VerifierKey<G>;
|
||||
|
||||
/// produces a succinct proof of satisfiability of a RelaxedR1CS instance
|
||||
fn prove(
|
||||
pk: &Self::ProverKey,
|
||||
U: &RelaxedR1CSInstance<G>,
|
||||
W: &RelaxedR1CSWitness<G>,
|
||||
) -> Result<Self, NovaError> {
|
||||
let mut transcript = Transcript::new(b"RelaxedR1CSSNARK");
|
||||
|
||||
debug_assert!(pk.S.is_sat_relaxed(&pk.gens_r1cs, U, W).is_ok());
|
||||
|
||||
// sanity check that R1CSShape has certain size characteristics
|
||||
assert_eq!(pk.S.num_cons.next_power_of_two(), pk.S.num_cons);
|
||||
assert_eq!(pk.S.num_vars.next_power_of_two(), pk.S.num_vars);
|
||||
assert_eq!(pk.S.num_io.next_power_of_two(), pk.S.num_io);
|
||||
assert!(pk.S.num_io < pk.S.num_vars);
|
||||
|
||||
// append the R1CSShape and RelaxedR1CSInstance to the transcript
|
||||
pk.S.append_to_transcript(b"S", &mut transcript);
|
||||
U.append_to_transcript(b"U", &mut transcript);
|
||||
|
||||
// compute the full satisfying assignment by concatenating W.W, U.u, and U.X
|
||||
let mut z = concat(vec![W.W.clone(), vec![U.u], U.X.clone()]);
|
||||
|
||||
let (num_rounds_x, num_rounds_y) = (
|
||||
(pk.S.num_cons as f64).log2() as usize,
|
||||
((pk.S.num_vars as f64).log2() as usize + 1) as usize,
|
||||
);
|
||||
|
||||
// outer sum-check
|
||||
let tau = (0..num_rounds_x)
|
||||
.map(|_i| G::Scalar::challenge(b"challenge_tau", &mut transcript))
|
||||
.collect();
|
||||
|
||||
let mut poly_tau = MultilinearPolynomial::new(EqPolynomial::new(tau).evals());
|
||||
let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = {
|
||||
let (poly_Az, poly_Bz, poly_Cz) = pk.S.multiply_vec(&z)?;
|
||||
let poly_uCz_E = (0..pk.S.num_cons)
|
||||
.map(|i| U.u * poly_Cz[i] + W.E[i])
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
(
|
||||
MultilinearPolynomial::new(poly_Az),
|
||||
MultilinearPolynomial::new(poly_Bz),
|
||||
MultilinearPolynomial::new(poly_Cz),
|
||||
MultilinearPolynomial::new(poly_uCz_E),
|
||||
)
|
||||
};
|
||||
|
||||
let comb_func_outer =
|
||||
|poly_A_comp: &G::Scalar,
|
||||
poly_B_comp: &G::Scalar,
|
||||
poly_C_comp: &G::Scalar,
|
||||
poly_D_comp: &G::Scalar|
|
||||
-> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) };
|
||||
let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term(
|
||||
&G::Scalar::zero(), // claim is zero
|
||||
num_rounds_x,
|
||||
&mut poly_tau,
|
||||
&mut poly_Az,
|
||||
&mut poly_Bz,
|
||||
&mut poly_uCz_E,
|
||||
comb_func_outer,
|
||||
&mut transcript,
|
||||
);
|
||||
|
||||
// claims from the end of sum-check
|
||||
let (claim_Az, claim_Bz): (G::Scalar, G::Scalar) = (claims_outer[1], claims_outer[2]);
|
||||
|
||||
claim_Az.append_to_transcript(b"claim_Az", &mut transcript);
|
||||
claim_Bz.append_to_transcript(b"claim_Bz", &mut transcript);
|
||||
let claim_Cz = poly_Cz.evaluate(&r_x);
|
||||
let eval_E = MultilinearPolynomial::new(W.E.clone()).evaluate(&r_x);
|
||||
claim_Cz.append_to_transcript(b"claim_Cz", &mut transcript);
|
||||
eval_E.append_to_transcript(b"eval_E", &mut transcript);
|
||||
|
||||
// inner sum-check
|
||||
let r_A = G::Scalar::challenge(b"challenge_rA", &mut transcript);
|
||||
let r_B = G::Scalar::challenge(b"challenge_rB", &mut transcript);
|
||||
let r_C = G::Scalar::challenge(b"challenge_rC", &mut transcript);
|
||||
let claim_inner_joint = r_A * claim_Az + r_B * claim_Bz + r_C * claim_Cz;
|
||||
|
||||
let poly_ABC = {
|
||||
// compute the initial evaluation table for R(\tau, x)
|
||||
let evals_rx = EqPolynomial::new(r_x.clone()).evals();
|
||||
|
||||
// Bounds "row" variables of (A, B, C) matrices viewed as 2d multilinear polynomials
|
||||
let compute_eval_table_sparse =
|
||||
|S: &R1CSShape<G>, rx: &[G::Scalar]| -> (Vec<G::Scalar>, Vec<G::Scalar>, Vec<G::Scalar>) {
|
||||
assert_eq!(rx.len(), S.num_cons);
|
||||
|
||||
let inner = |M: &Vec<(usize, usize, G::Scalar)>, M_evals: &mut Vec<G::Scalar>| {
|
||||
for (row, col, val) in M {
|
||||
M_evals[*col] += rx[*row] * val;
|
||||
}
|
||||
};
|
||||
|
||||
let (A_evals, (B_evals, C_evals)) = rayon::join(
|
||||
|| {
|
||||
let mut A_evals: Vec<G::Scalar> = vec![G::Scalar::zero(); 2 * S.num_vars];
|
||||
inner(&S.A, &mut A_evals);
|
||||
A_evals
|
||||
},
|
||||
|| {
|
||||
rayon::join(
|
||||
|| {
|
||||
let mut B_evals: Vec<G::Scalar> = vec![G::Scalar::zero(); 2 * S.num_vars];
|
||||
inner(&S.B, &mut B_evals);
|
||||
B_evals
|
||||
},
|
||||
|| {
|
||||
let mut C_evals: Vec<G::Scalar> = vec![G::Scalar::zero(); 2 * S.num_vars];
|
||||
inner(&S.C, &mut C_evals);
|
||||
C_evals
|
||||
},
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
(A_evals, B_evals, C_evals)
|
||||
};
|
||||
|
||||
let (evals_A, evals_B, evals_C) = compute_eval_table_sparse(&pk.S, &evals_rx);
|
||||
|
||||
assert_eq!(evals_A.len(), evals_B.len());
|
||||
assert_eq!(evals_A.len(), evals_C.len());
|
||||
(0..evals_A.len())
|
||||
.into_par_iter()
|
||||
.map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i])
|
||||
.collect::<Vec<G::Scalar>>()
|
||||
};
|
||||
|
||||
let poly_z = {
|
||||
z.resize(pk.S.num_vars * 2, G::Scalar::zero());
|
||||
z
|
||||
};
|
||||
|
||||
let comb_func = |poly_A_comp: &G::Scalar, poly_B_comp: &G::Scalar| -> G::Scalar {
|
||||
*poly_A_comp * *poly_B_comp
|
||||
};
|
||||
let (sc_proof_inner, r_y, _claims_inner) = SumcheckProof::prove_quad(
|
||||
&claim_inner_joint,
|
||||
num_rounds_y,
|
||||
&mut MultilinearPolynomial::new(poly_ABC),
|
||||
&mut MultilinearPolynomial::new(poly_z),
|
||||
comb_func,
|
||||
&mut transcript,
|
||||
);
|
||||
|
||||
let eval_W = MultilinearPolynomial::new(W.W.clone()).evaluate(&r_y[1..]);
|
||||
eval_W.append_to_transcript(b"eval_W", &mut transcript);
|
||||
|
||||
let (nifs_ip, r_U, r_W) = NIFSForInnerProduct::prove(
|
||||
&InnerProductInstance::new(&U.comm_E, &EqPolynomial::new(r_x).evals(), &eval_E),
|
||||
&InnerProductWitness::new(&W.E),
|
||||
&InnerProductInstance::new(
|
||||
&U.comm_W,
|
||||
&EqPolynomial::new(r_y[1..].to_vec()).evals(),
|
||||
&eval_W,
|
||||
),
|
||||
&InnerProductWitness::new(&W.W),
|
||||
&mut transcript,
|
||||
);
|
||||
|
||||
let ipa = InnerProductArgument::prove(
|
||||
&pk.gens_r1cs.gens,
|
||||
&pk.gens_ipa,
|
||||
&r_U,
|
||||
&r_W,
|
||||
&mut transcript,
|
||||
)?;
|
||||
|
||||
Ok(RelaxedR1CSSNARK {
|
||||
sc_proof_outer,
|
||||
claims_outer: (claim_Az, claim_Bz, claim_Cz),
|
||||
sc_proof_inner,
|
||||
eval_W,
|
||||
eval_E,
|
||||
nifs_ip,
|
||||
ipa,
|
||||
})
|
||||
}
|
||||
|
||||
/// verifies a proof of satisfiability of a RelaxedR1CS instance
|
||||
fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance<G>) -> Result<(), NovaError> {
|
||||
let mut transcript = Transcript::new(b"RelaxedR1CSSNARK");
|
||||
|
||||
// append the R1CSShape and RelaxedR1CSInstance to the transcript
|
||||
vk.S.append_to_transcript(b"S", &mut transcript);
|
||||
U.append_to_transcript(b"U", &mut transcript);
|
||||
|
||||
let (num_rounds_x, num_rounds_y) = (
|
||||
(vk.S.num_cons as f64).log2() as usize,
|
||||
((vk.S.num_vars as f64).log2() as usize + 1) as usize,
|
||||
);
|
||||
|
||||
// outer sum-check
|
||||
let tau = (0..num_rounds_x)
|
||||
.map(|_i| G::Scalar::challenge(b"challenge_tau", &mut transcript))
|
||||
.collect::<Vec<G::Scalar>>();
|
||||
|
||||
let (claim_outer_final, r_x) =
|
||||
self
|
||||
.sc_proof_outer
|
||||
.verify(G::Scalar::zero(), num_rounds_x, 3, &mut transcript)?;
|
||||
|
||||
// verify claim_outer_final
|
||||
let (claim_Az, claim_Bz, claim_Cz) = self.claims_outer;
|
||||
let taus_bound_rx = EqPolynomial::new(tau).evaluate(&r_x);
|
||||
let claim_outer_final_expected =
|
||||
taus_bound_rx * (claim_Az * claim_Bz - U.u * claim_Cz - self.eval_E);
|
||||
if claim_outer_final != claim_outer_final_expected {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
self
|
||||
.claims_outer
|
||||
.0
|
||||
.append_to_transcript(b"claim_Az", &mut transcript);
|
||||
self
|
||||
.claims_outer
|
||||
.1
|
||||
.append_to_transcript(b"claim_Bz", &mut transcript);
|
||||
self
|
||||
.claims_outer
|
||||
.2
|
||||
.append_to_transcript(b"claim_Cz", &mut transcript);
|
||||
self.eval_E.append_to_transcript(b"eval_E", &mut transcript);
|
||||
|
||||
// inner sum-check
|
||||
let r_A = G::Scalar::challenge(b"challenge_rA", &mut transcript);
|
||||
let r_B = G::Scalar::challenge(b"challenge_rB", &mut transcript);
|
||||
let r_C = G::Scalar::challenge(b"challenge_rC", &mut transcript);
|
||||
let claim_inner_joint =
|
||||
r_A * self.claims_outer.0 + r_B * self.claims_outer.1 + r_C * self.claims_outer.2;
|
||||
|
||||
let (claim_inner_final, r_y) =
|
||||
self
|
||||
.sc_proof_inner
|
||||
.verify(claim_inner_joint, num_rounds_y, 2, &mut transcript)?;
|
||||
|
||||
// verify claim_inner_final
|
||||
let eval_Z = {
|
||||
let eval_X = {
|
||||
// constant term
|
||||
let mut poly_X = vec![(0, U.u)];
|
||||
//remaining inputs
|
||||
poly_X.extend(
|
||||
(0..U.X.len())
|
||||
.map(|i| (i + 1, U.X[i]))
|
||||
.collect::<Vec<(usize, G::Scalar)>>(),
|
||||
);
|
||||
SparsePolynomial::new((vk.S.num_vars as f64).log2() as usize, poly_X)
|
||||
.evaluate(&r_y[1..].to_vec())
|
||||
};
|
||||
(G::Scalar::one() - r_y[0]) * self.eval_W + r_y[0] * eval_X
|
||||
};
|
||||
|
||||
let evaluate_as_sparse_polynomial = |S: &R1CSShape<G>,
|
||||
r_x: &[G::Scalar],
|
||||
r_y: &[G::Scalar]|
|
||||
-> (G::Scalar, G::Scalar, G::Scalar) {
|
||||
let evaluate_with_table =
|
||||
|M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar {
|
||||
(0..M.len())
|
||||
.map(|i| {
|
||||
let (row, col, val) = M[i];
|
||||
T_x[row] * T_y[col] * val
|
||||
})
|
||||
.fold(G::Scalar::zero(), |acc, x| acc + x)
|
||||
};
|
||||
|
||||
let T_x = EqPolynomial::new(r_x.to_vec()).evals();
|
||||
let T_y = EqPolynomial::new(r_y.to_vec()).evals();
|
||||
let eval_A_r = evaluate_with_table(&S.A, &T_x, &T_y);
|
||||
let eval_B_r = evaluate_with_table(&S.B, &T_x, &T_y);
|
||||
let eval_C_r = evaluate_with_table(&S.C, &T_x, &T_y);
|
||||
(eval_A_r, eval_B_r, eval_C_r)
|
||||
};
|
||||
|
||||
let (eval_A_r, eval_B_r, eval_C_r) = evaluate_as_sparse_polynomial(&vk.S, &r_x, &r_y);
|
||||
let claim_inner_final_expected = (r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * eval_Z;
|
||||
if claim_inner_final != claim_inner_final_expected {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
// verify eval_W and eval_E
|
||||
self.eval_W.append_to_transcript(b"eval_W", &mut transcript); //eval_E is already in the transcript
|
||||
|
||||
let r_U = self.nifs_ip.verify(
|
||||
&InnerProductInstance::new(&U.comm_E, &EqPolynomial::new(r_x).evals(), &self.eval_E),
|
||||
&InnerProductInstance::new(
|
||||
&U.comm_W,
|
||||
&EqPolynomial::new(r_y[1..].to_vec()).evals(),
|
||||
&self.eval_W,
|
||||
),
|
||||
&mut transcript,
|
||||
);
|
||||
|
||||
self.ipa.verify(
|
||||
&vk.gens_r1cs.gens,
|
||||
&vk.gens_ipa,
|
||||
max(vk.S.num_vars, vk.S.num_cons),
|
||||
&r_U,
|
||||
&mut transcript,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
150
src/spartan_with_ipa_pc/polynomial.rs
Normal file
150
src/spartan_with_ipa_pc/polynomial.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
use core::ops::Index;
|
||||
use ff::PrimeField;
|
||||
use rayon::prelude::*;
|
||||
|
||||
pub struct EqPolynomial<Scalar: PrimeField> {
|
||||
r: Vec<Scalar>,
|
||||
}
|
||||
|
||||
impl<Scalar: PrimeField> EqPolynomial<Scalar> {
|
||||
pub fn new(r: Vec<Scalar>) -> Self {
|
||||
EqPolynomial { r }
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
|
||||
assert_eq!(self.r.len(), rx.len());
|
||||
(0..rx.len())
|
||||
.map(|i| rx[i] * self.r[i] + (Scalar::one() - rx[i]) * (Scalar::one() - self.r[i]))
|
||||
.fold(Scalar::one(), |acc, item| acc * item)
|
||||
}
|
||||
|
||||
pub fn evals(&self) -> Vec<Scalar> {
|
||||
let ell = self.r.len();
|
||||
let mut evals: Vec<Scalar> = vec![Scalar::zero(); (2_usize).pow(ell as u32) as usize];
|
||||
let mut size = 1;
|
||||
evals[0] = Scalar::one();
|
||||
|
||||
for r in self.r.iter().rev() {
|
||||
let (evals_left, evals_right) = evals.split_at_mut(size);
|
||||
let (evals_right, _) = evals_right.split_at_mut(size);
|
||||
|
||||
evals_left
|
||||
.par_iter_mut()
|
||||
.zip(evals_right.par_iter_mut())
|
||||
.for_each(|(x, y)| {
|
||||
*y = *x * r;
|
||||
*x -= &*y;
|
||||
});
|
||||
|
||||
size *= 2;
|
||||
}
|
||||
evals
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MultilinearPolynomial<Scalar: PrimeField> {
|
||||
num_vars: usize, // the number of variables in the multilinear polynomial
|
||||
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
|
||||
}
|
||||
|
||||
impl<Scalar: PrimeField> MultilinearPolynomial<Scalar> {
|
||||
pub fn new(Z: Vec<Scalar>) -> Self {
|
||||
assert_eq!(Z.len(), (2_usize).pow((Z.len() as f64).log2() as u32));
|
||||
MultilinearPolynomial {
|
||||
num_vars: (Z.len() as f64).log2() as usize,
|
||||
Z,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_num_vars(&self) -> usize {
|
||||
self.num_vars
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.Z.len()
|
||||
}
|
||||
|
||||
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
|
||||
let n = self.len() / 2;
|
||||
|
||||
let (left, right) = self.Z.split_at_mut(n);
|
||||
let (right, _) = right.split_at(n);
|
||||
|
||||
left
|
||||
.par_iter_mut()
|
||||
.zip(right.par_iter())
|
||||
.for_each(|(a, b)| {
|
||||
*a += *r * (*b - *a);
|
||||
});
|
||||
|
||||
self.Z.resize(n, Scalar::zero());
|
||||
self.num_vars -= 1;
|
||||
}
|
||||
|
||||
// returns Z(r) in O(n) time
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
// r must have a value for each variable
|
||||
assert_eq!(r.len(), self.get_num_vars());
|
||||
let chis = EqPolynomial::new(r.to_vec()).evals();
|
||||
assert_eq!(chis.len(), self.Z.len());
|
||||
|
||||
(0..chis.len())
|
||||
.into_par_iter()
|
||||
.map(|i| chis[i] * self.Z[i])
|
||||
.reduce(Scalar::zero, |x, y| x + y)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Scalar: PrimeField> Index<usize> for MultilinearPolynomial<Scalar> {
|
||||
type Output = Scalar;
|
||||
|
||||
#[inline(always)]
|
||||
fn index(&self, _index: usize) -> &Scalar {
|
||||
&(self.Z[_index])
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SparsePolynomial<Scalar: PrimeField> {
|
||||
num_vars: usize,
|
||||
Z: Vec<(usize, Scalar)>,
|
||||
}
|
||||
|
||||
impl<Scalar: PrimeField> SparsePolynomial<Scalar> {
|
||||
pub fn new(num_vars: usize, Z: Vec<(usize, Scalar)>) -> Self {
|
||||
SparsePolynomial { num_vars, Z }
|
||||
}
|
||||
|
||||
fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar {
|
||||
assert_eq!(a.len(), r.len());
|
||||
let mut chi_i = Scalar::one();
|
||||
for j in 0..r.len() {
|
||||
if a[j] {
|
||||
chi_i *= r[j];
|
||||
} else {
|
||||
chi_i *= Scalar::one() - r[j];
|
||||
}
|
||||
}
|
||||
chi_i
|
||||
}
|
||||
|
||||
// Takes O(n log n). TODO: do this in O(n) where n is the number of entries in Z
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
assert_eq!(self.num_vars, r.len());
|
||||
|
||||
let get_bits = |num: usize, num_bits: usize| -> Vec<bool> {
|
||||
(0..num_bits)
|
||||
.into_par_iter()
|
||||
.map(|shift_amount| ((num & (1 << (num_bits - shift_amount - 1))) > 0))
|
||||
.collect::<Vec<bool>>()
|
||||
};
|
||||
|
||||
(0..self.Z.len())
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
let bits = get_bits(self.Z[i].0, r.len());
|
||||
SparsePolynomial::compute_chi(&bits, r) * self.Z[i].1
|
||||
})
|
||||
.reduce(Scalar::zero, |x, y| x + y)
|
||||
}
|
||||
}
|
||||
331
src/spartan_with_ipa_pc/sumcheck.rs
Normal file
331
src/spartan_with_ipa_pc/sumcheck.rs
Normal file
@@ -0,0 +1,331 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
use super::polynomial::MultilinearPolynomial;
|
||||
use crate::errors::NovaError;
|
||||
use crate::traits::{AppendToTranscriptTrait, ChallengeTrait, Group};
|
||||
use core::marker::PhantomData;
|
||||
use ff::Field;
|
||||
use merlin::Transcript;
|
||||
use rayon::prelude::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SumcheckProof<G: Group> {
|
||||
compressed_polys: Vec<CompressedUniPoly<G>>,
|
||||
}
|
||||
|
||||
impl<G: Group> SumcheckProof<G> {
|
||||
pub fn verify(
|
||||
&self,
|
||||
claim: G::Scalar,
|
||||
num_rounds: usize,
|
||||
degree_bound: usize,
|
||||
transcript: &mut Transcript,
|
||||
) -> Result<(G::Scalar, Vec<G::Scalar>), NovaError> {
|
||||
let mut e = claim;
|
||||
let mut r: Vec<G::Scalar> = Vec::new();
|
||||
|
||||
// verify that there is a univariate polynomial for each round
|
||||
if self.compressed_polys.len() != num_rounds {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
for i in 0..self.compressed_polys.len() {
|
||||
let poly = self.compressed_polys[i].decompress(&e);
|
||||
|
||||
// verify degree bound
|
||||
if poly.degree() != degree_bound {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
// check if G_k(0) + G_k(1) = e
|
||||
if poly.eval_at_zero() + poly.eval_at_one() != e {
|
||||
return Err(NovaError::InvalidSumcheckProof);
|
||||
}
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_transcript(b"poly", transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_i = G::Scalar::challenge(b"challenge_nextround", transcript);
|
||||
|
||||
r.push(r_i);
|
||||
|
||||
// evaluate the claimed degree-ell polynomial at r_i
|
||||
e = poly.evaluate(&r_i);
|
||||
}
|
||||
|
||||
Ok((e, r))
|
||||
}
|
||||
|
||||
pub fn prove_quad<F>(
|
||||
claim: &G::Scalar,
|
||||
num_rounds: usize,
|
||||
poly_A: &mut MultilinearPolynomial<G::Scalar>,
|
||||
poly_B: &mut MultilinearPolynomial<G::Scalar>,
|
||||
comb_func: F,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Self, Vec<G::Scalar>, Vec<G::Scalar>)
|
||||
where
|
||||
F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync,
|
||||
{
|
||||
let mut r: Vec<G::Scalar> = Vec::new();
|
||||
let mut polys: Vec<CompressedUniPoly<G>> = Vec::new();
|
||||
let mut claim_per_round = *claim;
|
||||
for _ in 0..num_rounds {
|
||||
let poly = {
|
||||
let len = poly_A.len() / 2;
|
||||
|
||||
// Make an iterator returning the contributions to the evaluations
|
||||
let (eval_point_0, eval_point_2) = (0..len)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// eval 0: bound_func is A(low)
|
||||
let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point);
|
||||
(eval_point_0, eval_point_2)
|
||||
})
|
||||
.reduce(
|
||||
|| (G::Scalar::zero(), G::Scalar::zero()),
|
||||
|a, b| (a.0 + b.0, a.1 + b.1),
|
||||
);
|
||||
|
||||
let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2];
|
||||
UniPoly::from_evals(&evals)
|
||||
};
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_transcript(b"poly", transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_i = G::Scalar::challenge(b"challenge_nextround", transcript);
|
||||
r.push(r_i);
|
||||
polys.push(poly.compress());
|
||||
|
||||
// Set up next round
|
||||
claim_per_round = poly.evaluate(&r_i);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
poly_A.bound_poly_var_top(&r_i);
|
||||
poly_B.bound_poly_var_top(&r_i);
|
||||
}
|
||||
|
||||
(
|
||||
SumcheckProof {
|
||||
compressed_polys: polys,
|
||||
},
|
||||
r,
|
||||
vec![poly_A[0], poly_B[0]],
|
||||
)
|
||||
}
|
||||
|
||||
pub fn prove_cubic_with_additive_term<F>(
|
||||
claim: &G::Scalar,
|
||||
num_rounds: usize,
|
||||
poly_A: &mut MultilinearPolynomial<G::Scalar>,
|
||||
poly_B: &mut MultilinearPolynomial<G::Scalar>,
|
||||
poly_C: &mut MultilinearPolynomial<G::Scalar>,
|
||||
poly_D: &mut MultilinearPolynomial<G::Scalar>,
|
||||
comb_func: F,
|
||||
transcript: &mut Transcript,
|
||||
) -> (Self, Vec<G::Scalar>, Vec<G::Scalar>)
|
||||
where
|
||||
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync,
|
||||
{
|
||||
let mut r: Vec<G::Scalar> = Vec::new();
|
||||
let mut polys: Vec<CompressedUniPoly<G>> = Vec::new();
|
||||
let mut claim_per_round = *claim;
|
||||
|
||||
for _ in 0..num_rounds {
|
||||
let poly = {
|
||||
let len = poly_A.len() / 2;
|
||||
|
||||
// Make an iterator returning the contributions to the evaluations
|
||||
let (eval_point_0, eval_point_2, eval_point_3) = (0..len)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// eval 0: bound_func is A(low)
|
||||
let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
|
||||
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i];
|
||||
let eval_point_2 = comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
&poly_D_bound_point,
|
||||
);
|
||||
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i];
|
||||
let eval_point_3 = comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
&poly_C_bound_point,
|
||||
&poly_D_bound_point,
|
||||
);
|
||||
(eval_point_0, eval_point_2, eval_point_3)
|
||||
})
|
||||
.reduce(
|
||||
|| (G::Scalar::zero(), G::Scalar::zero(), G::Scalar::zero()),
|
||||
|a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2),
|
||||
);
|
||||
|
||||
let evals = vec![
|
||||
eval_point_0,
|
||||
claim_per_round - eval_point_0,
|
||||
eval_point_2,
|
||||
eval_point_3,
|
||||
];
|
||||
UniPoly::from_evals(&evals)
|
||||
};
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_transcript(b"poly", transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_i = G::Scalar::challenge(b"challenge_nextround", transcript);
|
||||
r.push(r_i);
|
||||
polys.push(poly.compress());
|
||||
|
||||
// Set up next round
|
||||
claim_per_round = poly.evaluate(&r_i);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
poly_A.bound_poly_var_top(&r_i);
|
||||
poly_B.bound_poly_var_top(&r_i);
|
||||
poly_C.bound_poly_var_top(&r_i);
|
||||
poly_D.bound_poly_var_top(&r_i);
|
||||
}
|
||||
|
||||
(
|
||||
SumcheckProof {
|
||||
compressed_polys: polys,
|
||||
},
|
||||
r,
|
||||
vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ax^2 + bx + c stored as vec![a,b,c]
|
||||
// ax^3 + bx^2 + cx + d stored as vec![a,b,c,d]
|
||||
#[derive(Debug)]
|
||||
pub struct UniPoly<G: Group> {
|
||||
coeffs: Vec<G::Scalar>,
|
||||
}
|
||||
|
||||
// ax^2 + bx + c stored as vec![a,c]
|
||||
// ax^3 + bx^2 + cx + d stored as vec![a,c,d]
|
||||
#[derive(Debug)]
|
||||
pub struct CompressedUniPoly<G: Group> {
|
||||
coeffs_except_linear_term: Vec<G::Scalar>,
|
||||
_p: PhantomData<G>,
|
||||
}
|
||||
|
||||
impl<G: Group> UniPoly<G> {
|
||||
pub fn from_evals(evals: &[G::Scalar]) -> Self {
|
||||
// we only support degree-2 or degree-3 univariate polynomials
|
||||
assert!(evals.len() == 3 || evals.len() == 4);
|
||||
let coeffs = if evals.len() == 3 {
|
||||
// ax^2 + bx + c
|
||||
let two_inv = G::Scalar::from(2).invert().unwrap();
|
||||
|
||||
let c = evals[0];
|
||||
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
|
||||
let b = evals[1] - c - a;
|
||||
vec![c, b, a]
|
||||
} else {
|
||||
// ax^3 + bx^2 + cx + d
|
||||
let two_inv = G::Scalar::from(2).invert().unwrap();
|
||||
let six_inv = G::Scalar::from(6).invert().unwrap();
|
||||
|
||||
let d = evals[0];
|
||||
let a = six_inv
|
||||
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]);
|
||||
let b = two_inv
|
||||
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
|
||||
+ evals[2]
|
||||
+ evals[2]
|
||||
+ evals[2]
|
||||
+ evals[2]
|
||||
- evals[3]);
|
||||
let c = evals[1] - d - a - b;
|
||||
vec![d, c, b, a]
|
||||
};
|
||||
|
||||
UniPoly { coeffs }
|
||||
}
|
||||
|
||||
pub fn degree(&self) -> usize {
|
||||
self.coeffs.len() - 1
|
||||
}
|
||||
|
||||
pub fn eval_at_zero(&self) -> G::Scalar {
|
||||
self.coeffs[0]
|
||||
}
|
||||
|
||||
pub fn eval_at_one(&self) -> G::Scalar {
|
||||
(0..self.coeffs.len())
|
||||
.into_par_iter()
|
||||
.map(|i| self.coeffs[i])
|
||||
.reduce(G::Scalar::zero, |a, b| a + b)
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r: &G::Scalar) -> G::Scalar {
|
||||
let mut eval = self.coeffs[0];
|
||||
let mut power = *r;
|
||||
for coeff in self.coeffs.iter().skip(1) {
|
||||
eval += power * coeff;
|
||||
power *= r;
|
||||
}
|
||||
eval
|
||||
}
|
||||
|
||||
pub fn compress(&self) -> CompressedUniPoly<G> {
|
||||
let coeffs_except_linear_term = [&self.coeffs[0..1], &self.coeffs[2..]].concat();
|
||||
assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
|
||||
CompressedUniPoly {
|
||||
coeffs_except_linear_term,
|
||||
_p: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group> CompressedUniPoly<G> {
|
||||
// we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
|
||||
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
|
||||
pub fn decompress(&self, hint: &G::Scalar) -> UniPoly<G> {
|
||||
let mut linear_term =
|
||||
*hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
|
||||
for i in 1..self.coeffs_except_linear_term.len() {
|
||||
linear_term -= self.coeffs_except_linear_term[i];
|
||||
}
|
||||
|
||||
let mut coeffs: Vec<G::Scalar> = Vec::new();
|
||||
coeffs.extend(vec![&self.coeffs_except_linear_term[0]]);
|
||||
coeffs.extend(vec![&linear_term]);
|
||||
coeffs.extend(self.coeffs_except_linear_term[1..].to_vec());
|
||||
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
|
||||
UniPoly { coeffs }
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: Group> AppendToTranscriptTrait for UniPoly<G> {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_message(label, b"UniPoly_begin");
|
||||
for i in 0..self.coeffs.len() {
|
||||
self.coeffs[i].append_to_transcript(b"coeff", transcript);
|
||||
}
|
||||
transcript.append_message(label, b"UniPoly_end");
|
||||
}
|
||||
}
|
||||
@@ -19,18 +19,20 @@ pub trait Group:
|
||||
+ GroupOpsOwned
|
||||
+ ScalarMul<<Self as Group>::Scalar>
|
||||
+ ScalarMulOwned<<Self as Group>::Scalar>
|
||||
+ Send
|
||||
+ Sync
|
||||
{
|
||||
/// A type representing an element of the base field of the group
|
||||
type Base: PrimeField + PrimeFieldBits;
|
||||
|
||||
/// A type representing an element of the scalar field of the group
|
||||
type Scalar: PrimeField + PrimeFieldBits + ChallengeTrait;
|
||||
type Scalar: PrimeField + PrimeFieldBits + ChallengeTrait + Send + Sync;
|
||||
|
||||
/// A type representing the compressed version of the group element
|
||||
type CompressedGroupElement: CompressedGroup<GroupElement = Self>;
|
||||
|
||||
/// A type representing preprocessed group element
|
||||
type PreprocessedGroupElement;
|
||||
type PreprocessedGroupElement: Clone + Send + Sync;
|
||||
|
||||
/// A type that represents a hash function that consumes elements
|
||||
/// from the base field and squeezes out elements of the scalar field
|
||||
@@ -45,6 +47,9 @@ pub trait Group:
|
||||
/// Compresses the group element
|
||||
fn compress(&self) -> Self::CompressedGroupElement;
|
||||
|
||||
/// Produces a preprocessed element
|
||||
fn preprocessed(&self) -> Self::PreprocessedGroupElement;
|
||||
|
||||
/// Produce a vector of group elements using a static label
|
||||
fn from_label(label: &'static [u8], n: usize) -> Vec<Self::PreprocessedGroupElement>;
|
||||
|
||||
@@ -88,7 +93,7 @@ pub trait ChallengeTrait {
|
||||
/// A helper trait that defines the behavior of a hash function that we use as an RO
|
||||
pub trait HashFuncTrait<Base, Scalar> {
|
||||
/// A type representing constants/parameters associated with the hash function
|
||||
type Constants: HashFuncConstantsTrait<Base> + Clone;
|
||||
type Constants: HashFuncConstantsTrait<Base> + Clone + Send + Sync;
|
||||
|
||||
/// Initializes the hash function
|
||||
fn new(constants: Self::Constants) -> Self;
|
||||
@@ -135,7 +140,7 @@ pub trait ScalarMulOwned<Rhs, Output = Self>: for<'r> ScalarMul<&'r Rhs, Output>
|
||||
impl<T, Rhs, Output> ScalarMulOwned<Rhs, Output> for T where T: for<'r> ScalarMul<&'r Rhs, Output> {}
|
||||
|
||||
/// A helper trait for a step of the incremental computation (i.e., circuit for F)
|
||||
pub trait StepCircuit<F: PrimeField> {
|
||||
pub trait StepCircuit<F: PrimeField>: Send + Sync + Clone {
|
||||
/// Sythesize the circuit for a computation step and return variable
|
||||
/// that corresponds to the output of the step z_{i+1}
|
||||
fn synthesize<CS: ConstraintSystem<F>>(
|
||||
|
||||
Reference in New Issue
Block a user