Add solidity groth16, kzg10 and final decider verifiers in a dedicated workspace (#70)

* change: Refactor structure into workspace

* chore: Add empty readme

* change: Transform repo into workspace

* add: Create folding-verifier-solidity crate

* add: Include askama.toml for `sol` extension escaper

* add: Jordi's old Groth16 verifier .sol template and adapt it

* tmp: create simple template struct to test

* Update FoldingSchemes trait, fit Nova+CycleFold

- update lib.rs's `FoldingScheme` trait interface
- fit Nova+CycleFold into the `FoldingScheme` trait
- refactor `src/nova/*`

* chore: add serialization assets for testing

Now we include an `assets` folder with a serialized proof & vk for tests

* Add `examples` dir, with Nova's `FoldingScheme` example

* polishing

* expose poseidon_test_config outside tests

* change: Refactor structure into workspace

* chore: Add empty readme

* change: Transform repo into workspace

* add: Create folding-verifier-solidity crate

* add: Include askama.toml for `sol` extension escaper

* add: Jordi's old Groth16 verifier .sol template and adapt it

* tmp: create simple template struct to test

* feat: templating kzg working

* chore: add emv and revm

* feat: start evm file

* chore: add ark-poly-commit

* chore: move `commitment` to `folding-schemes`

* chore: update `.gitignore` to ignore generated contracts

* chore: update template with bn254 lib on it (avoids import), update for loop to account for whitespaces

* refactor: update template with no lib

* feat: add evm deploy code, compile and create kzg verifier

* chore: update `Cargo.toml` to have `folding-schemes` available with verifiers

* feat: start kzg prove and verify with sol

* chore: compute crs from kzg prover

* feat: evm kzg verification passing

* tmp

* change: Swap order of G2 coordinates within the template

* Update way to serialize proof with correct order

* chore: update `Cargo.toml`

* chore: add revm

* chore: add `save_solidity`

* refactor: verifiers in dedicated mod

* refactor: have dedicated `utils` module

* chore: expose modules

* chore: update verifier for kzg

* chore: rename templates

* fix: look for binary using also name of contract

* refactor: generate groth16 proof for sha256 pre-image, generate groth16 template with verifying key

* chore: template renaming

* fix: switch circuit for circuit that simply adds

* feat: generates test data on the fly

* feat: update to latest groth16 verifier

* refactor: rename folder, update `.gitignore`

* chore: update `Cargo.toml`

* chore: update templates extension to indicate that they are templates

* chore: rename templates, both files and structs

* fix: template inheritance working

* feat: template spdx and pragma statements

* feat: decider verifier compiles, update test for kzg10 and groth16 templates

* feat: parameterize which size of the crs should be stored on the contract

* chore: add comment on how the groth16 and kzg10 proofs will be linked together

* chore: cargo clippy run

* chore: cargo clippy tests

* chore: cargo fmt

* refactor: remove unused lifetime parameter

* chore: end merge

* chore: move examples to `folding-schemes` workspace

* get latest main changes

* fix: temp fix clippy warnings, will remove lints once not used in tests only

* fix: cargo clippy lint added on `code_size`

* fix: update path to test circuit and add step for installing solc

* chore: remove `save_solidity` steps

* fix: the borrowed expression implements the required traits

* chore: update `Cargo.toml`

* chore: remove extra `[patch.crates-io]`

* fix: update to patch at the workspace level and add comment explaining this

* refactor: correct `staticcall` with valid input/output sizes and change return syntax for pairing

* refactor: expose modules and remove `dead_code` calls

* chore: update `README.md`, add additional comments on `kzg10` template and update `groth16` template comments

* chore: be clearer on attributions on `kzg10`

---------

Co-authored-by: CPerezz <c.perezbaro@gmail.com>
Co-authored-by: arnaucube <root@arnaucube.com>
This commit is contained in:
Pierre
2024-02-09 08:19:25 +01:00
committed by GitHub
parent 97e973a685
commit 63dbbfe1bc
67 changed files with 1208 additions and 53 deletions

View File

@@ -0,0 +1,48 @@
[package]
name = "folding-schemes"
version = "0.1.0"
edition = "2021"
[dependencies]
ark-ec = "^0.4.0"
ark-ff = "^0.4.0"
ark-poly = "^0.4.0"
ark-std = "^0.4.0"
ark-crypto-primitives = { version = "^0.4.0", default-features = false, features = ["r1cs", "sponge", "crh"] }
ark-poly-commit = "^0.4.0"
ark-relations = { version = "^0.4.0", default-features = false }
ark-r1cs-std = { version = "0.4.0", default-features = false } # this is patched at the workspace level
ark-snark = { version = "^0.4.0"}
ark-serialize = "^0.4.0"
ark-circom = { git = "https://github.com/gakonst/ark-circom.git" }
thiserror = "1.0"
rayon = "1.7.0"
num-bigint = "0.4"
color-eyre = "=0.6.2"
# tmp imports for espresso's sumcheck
espresso_subroutines = {git="https://github.com/EspressoSystems/hyperplonk", package="subroutines"}
[dev-dependencies]
ark-pallas = {version="0.4.0", features=["r1cs"]}
ark-vesta = {version="0.4.0", features=["r1cs"]}
ark-bn254 = "0.4.0"
ark-mnt4-298 = {version="0.4.0", features=["r1cs"]}
ark-mnt6-298 = {version="0.4.0", features=["r1cs"]}
ark-groth16 = { version = "^0.4.0" }
rand = "0.8.5"
tracing = { version = "0.1", default-features = false, features = [ "attributes" ] }
tracing-subscriber = { version = "0.2" }
[features]
default = ["parallel"]
parallel = [
"ark-std/parallel",
"ark-ff/parallel",
"ark-ec/parallel",
"ark-poly/parallel",
"ark-crypto-primitives/parallel",
"ark-r1cs-std/parallel",
]

View File

@@ -0,0 +1,172 @@
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(clippy::upper_case_acronyms)]
use ark_crypto_primitives::crh::{
sha256::{
constraints::{Sha256Gadget, UnitVar},
Sha256,
},
CRHScheme, CRHSchemeGadget,
};
use ark_ff::{BigInteger, PrimeField, ToConstraintField};
use ark_r1cs_std::{fields::fp::FpVar, ToBytesGadget, ToConstraintFieldGadget};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use core::marker::PhantomData;
use std::time::Instant;
use ark_pallas::{constraints::GVar, Fr, Projective};
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use folding_schemes::commitment::pedersen::Pedersen;
use folding_schemes::folding::nova::{get_r1cs, Nova, ProverParams, VerifierParams};
use folding_schemes::frontend::FCircuit;
use folding_schemes::transcript::poseidon::poseidon_test_config;
use folding_schemes::{Error, FoldingScheme};
/// This is the circuit that we want to fold, it implements the FCircuit trait.
/// The parameter z_i denotes the current state, and z_{i+1} denotes the next state which we get by
/// applying the step.
/// In this example we set z_i and z_{i+1} to be a single value, but the trait is made to support
/// arrays, so our state could be an array with different values.
#[derive(Clone, Copy, Debug)]
pub struct Sha256FCircuit<F: PrimeField> {
_f: PhantomData<F>,
}
impl<F: PrimeField> FCircuit<F> for Sha256FCircuit<F> {
type Params = ();
fn new(_params: Self::Params) -> Self {
Self { _f: PhantomData }
}
/// computes the next state values in place, assigning z_{i+1} into z_i, and computing the new
/// z_{i+1}
fn step_native(self, z_i: Vec<F>) -> Result<Vec<F>, Error> {
let out_bytes = Sha256::evaluate(&(), z_i[0].into_bigint().to_bytes_le()).unwrap();
let out: Vec<F> = out_bytes.to_field_elements().unwrap();
Ok(vec![out[0]])
}
/// generates the constraints for the step of F for the given z_i
fn generate_step_constraints(
self,
_cs: ConstraintSystemRef<F>,
z_i: Vec<FpVar<F>>,
) -> Result<Vec<FpVar<F>>, SynthesisError> {
let unit_var = UnitVar::default();
let out_bytes = Sha256Gadget::evaluate(&unit_var, &z_i[0].to_bytes()?)?;
let out = out_bytes.0.to_constraint_field()?;
Ok(vec![out[0].clone()])
}
}
/// cargo test --example simple
#[cfg(test)]
pub mod tests {
use super::*;
use ark_r1cs_std::alloc::AllocVar;
use ark_relations::r1cs::ConstraintSystem;
// test to check that the Sha256FCircuit computes the same values inside and outside the circuit
#[test]
fn test_sha256_f_circuit() {
let cs = ConstraintSystem::<Fr>::new_ref();
let circuit = Sha256FCircuit::<Fr>::new(());
let z_i = vec![Fr::from(1_u32)];
let z_i1 = circuit.step_native(z_i.clone()).unwrap();
let z_iVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z_i)).unwrap();
let computed_z_i1Var = circuit
.generate_step_constraints(cs.clone(), z_iVar.clone())
.unwrap();
assert_eq!(computed_z_i1Var.value().unwrap(), z_i1);
}
}
// This method computes the Prover & Verifier parameters for the example. For a real world use case
// those parameters should be generated carefuly (both the PoseidonConfig and the PedersenParams)
#[allow(clippy::type_complexity)]
fn nova_setup<FC: FCircuit<Fr>>(
F_circuit: FC,
) -> (
ProverParams<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>>,
VerifierParams<Projective, Projective2>,
) {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
// get the CM & CF_CM len
let (r1cs, cf_r1cs) =
get_r1cs::<Projective, GVar, Projective2, GVar2, FC>(&poseidon_config, F_circuit).unwrap();
let cm_len = r1cs.A.n_rows;
let cf_cm_len = cf_r1cs.A.n_rows;
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
let prover_params =
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
poseidon_config: poseidon_config.clone(),
cm_params: pedersen_params,
cf_cm_params: cf_pedersen_params,
};
let verifier_params = VerifierParams::<Projective, Projective2> {
poseidon_config: poseidon_config.clone(),
r1cs,
cf_r1cs,
};
(prover_params, verifier_params)
}
/// cargo run --release --example fold_sha256
fn main() {
let num_steps = 10;
let initial_state = vec![Fr::from(1_u32)];
let F_circuit = Sha256FCircuit::<Fr>::new(());
println!("Prepare Nova ProverParams & VerifierParams");
let (prover_params, verifier_params) = nova_setup::<Sha256FCircuit<Fr>>(F_circuit);
/// The idea here is that eventually we could replace the next line chunk that defines the
/// `type NOVA = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme`
/// trait, and the rest of our code would be working without needing to be updated.
type NOVA = Nova<
Projective,
GVar,
Projective2,
GVar2,
Sha256FCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
>;
println!("Initialize FoldingScheme");
let mut folding_scheme = NOVA::init(&prover_params, F_circuit, initial_state.clone()).unwrap();
// compute a step of the IVC
for i in 0..num_steps {
let start = Instant::now();
folding_scheme.prove_step().unwrap();
println!("Nova::prove_step {}: {:?}", i, start.elapsed());
}
let (running_instance, incomming_instance, cyclefold_instance) = folding_scheme.instances();
println!("Run the Nova's IVC verifier");
NOVA::verify(
verifier_params,
initial_state,
folding_scheme.state(), // latest state
Fr::from(num_steps as u32),
running_instance,
incomming_instance,
cyclefold_instance,
)
.unwrap();
}

View File

@@ -0,0 +1,128 @@
use ark_ec::CurveGroup;
use ark_std::log2;
use ark_std::{One, Zero};
use std::ops::Neg;
use crate::utils::vec::*;
use crate::Error;
pub mod r1cs;
use r1cs::R1CS;
/// CCS represents the Customizable Constraint Systems structure defined in
/// the [CCS paper](https://eprint.iacr.org/2023/552)
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct CCS<C: CurveGroup> {
/// m: number of rows in M_i (such that M_i \in F^{m, n})
pub m: usize,
/// n = |z|, number of cols in M_i
pub n: usize,
/// l = |io|, size of public input/output
pub l: usize,
/// t = |M|, number of matrices
pub t: usize,
/// q = |c| = |S|, number of multisets
pub q: usize,
/// d: max degree in each variable
pub d: usize,
/// s = log(m), dimension of x
pub s: usize,
/// s_prime = log(n), dimension of y
pub s_prime: usize,
/// vector of matrices
pub M: Vec<SparseMatrix<C::ScalarField>>,
/// vector of multisets
pub S: Vec<Vec<usize>>,
/// vector of coefficients
pub c: Vec<C::ScalarField>,
}
impl<C: CurveGroup> CCS<C> {
/// check that a CCS structure is satisfied by a z vector. Only for testing.
pub fn check_relation(&self, z: &[C::ScalarField]) -> Result<(), Error> {
let mut result = vec![C::ScalarField::zero(); self.m];
for i in 0..self.q {
// extract the needed M_j matrices out of S_i
let vec_M_j: Vec<&SparseMatrix<C::ScalarField>> =
self.S[i].iter().map(|j| &self.M[*j]).collect();
// complete the hadamard chain
let mut hadamard_result = vec![C::ScalarField::one(); self.m];
for M_j in vec_M_j.into_iter() {
hadamard_result = hadamard(&hadamard_result, &mat_vec_mul_sparse(M_j, z)?)?;
}
// multiply by the coefficient of this step
let c_M_j_z = vec_scalar_mul(&hadamard_result, &self.c[i]);
// add it to the final vector
result = vec_add(&result, &c_M_j_z)?;
}
// make sure the final vector is all zeroes
for e in result {
if !e.is_zero() {
return Err(Error::NotSatisfied);
}
}
Ok(())
}
}
impl<C: CurveGroup> CCS<C> {
pub fn from_r1cs(r1cs: R1CS<C::ScalarField>) -> Self {
let m = r1cs.A.n_rows;
let n = r1cs.A.n_cols;
CCS {
m,
n,
l: r1cs.l,
s: log2(m) as usize,
s_prime: log2(n) as usize,
t: 3,
q: 2,
d: 2,
S: vec![vec![0, 1], vec![2]],
c: vec![C::ScalarField::one(), C::ScalarField::one().neg()],
M: vec![r1cs.A, r1cs.B, r1cs.C],
}
}
pub fn to_r1cs(self) -> R1CS<C::ScalarField> {
R1CS::<C::ScalarField> {
l: self.l,
A: self.M[0].clone(),
B: self.M[1].clone(),
C: self.M[2].clone(),
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z};
use ark_ff::PrimeField;
use ark_pallas::Projective;
pub fn get_test_ccs<C: CurveGroup>() -> CCS<C> {
let r1cs = get_test_r1cs::<C::ScalarField>();
CCS::<C>::from_r1cs(r1cs)
}
pub fn get_test_z<F: PrimeField>(input: usize) -> Vec<F> {
r1cs_get_test_z(input)
}
/// Test that a basic CCS relation can be satisfied
#[test]
fn test_ccs_relation() {
let ccs = get_test_ccs::<Projective>();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
}
}

View File

@@ -0,0 +1,168 @@
use ark_ff::PrimeField;
use crate::utils::vec::*;
use crate::Error;
use ark_relations::r1cs::ConstraintSystem;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct R1CS<F: PrimeField> {
pub l: usize, // io len
pub A: SparseMatrix<F>,
pub B: SparseMatrix<F>,
pub C: SparseMatrix<F>,
}
impl<F: PrimeField> R1CS<F> {
/// returns a tuple containing (w, x) (witness and public inputs respectively)
pub fn split_z(&self, z: &[F]) -> (Vec<F>, Vec<F>) {
(z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec())
}
/// check that a R1CS structure is satisfied by a z vector. Only for testing.
pub fn check_relation(&self, z: &[F]) -> Result<(), Error> {
let Az = mat_vec_mul_sparse(&self.A, z)?;
let Bz = mat_vec_mul_sparse(&self.B, z)?;
let Cz = mat_vec_mul_sparse(&self.C, z)?;
let AzBz = hadamard(&Az, &Bz)?;
if AzBz != Cz {
return Err(Error::NotSatisfied);
}
Ok(())
}
/// converts the R1CS instance into a RelaxedR1CS as described in
/// [Nova](https://eprint.iacr.org/2021/370.pdf) section 4.1.
pub fn relax(self) -> RelaxedR1CS<F> {
RelaxedR1CS::<F> {
l: self.l,
E: vec![F::zero(); self.A.n_rows],
A: self.A,
B: self.B,
C: self.C,
u: F::one(),
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct RelaxedR1CS<F: PrimeField> {
pub l: usize, // io len
pub A: SparseMatrix<F>,
pub B: SparseMatrix<F>,
pub C: SparseMatrix<F>,
pub u: F,
pub E: Vec<F>,
}
impl<F: PrimeField> RelaxedR1CS<F> {
/// check that a RelaxedR1CS structure is satisfied by a z vector. Only for testing.
pub fn check_relation(&self, z: &[F]) -> Result<(), Error> {
let Az = mat_vec_mul_sparse(&self.A, z)?;
let Bz = mat_vec_mul_sparse(&self.B, z)?;
let Cz = mat_vec_mul_sparse(&self.C, z)?;
let uCz = vec_scalar_mul(&Cz, &self.u);
let uCzE = vec_add(&uCz, &self.E)?;
let AzBz = hadamard(&Az, &Bz)?;
if AzBz != uCzE {
return Err(Error::NotSatisfied);
}
Ok(())
}
}
/// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS
/// struct.
pub fn extract_r1cs<F: PrimeField>(cs: &ConstraintSystem<F>) -> R1CS<F> {
let m = cs.to_matrices().unwrap();
let n_rows = cs.num_constraints;
let n_cols = cs.num_instance_variables + cs.num_witness_variables; // cs.num_instance_variables already counts the 1
let A = SparseMatrix::<F> {
n_rows,
n_cols,
coeffs: m.a,
};
let B = SparseMatrix::<F> {
n_rows,
n_cols,
coeffs: m.b,
};
let C = SparseMatrix::<F> {
n_rows,
n_cols,
coeffs: m.c,
};
R1CS::<F> {
l: cs.num_instance_variables - 1, // -1 to substract the first '1'
A,
B,
C,
}
}
/// extracts the witness and the public inputs from arkworks ConstraintSystem.
pub fn extract_w_x<F: PrimeField>(cs: &ConstraintSystem<F>) -> (Vec<F>, Vec<F>) {
(
cs.witness_assignment.clone(),
// skip the first element which is '1'
cs.instance_assignment[1..].to_vec(),
)
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::utils::vec::tests::{to_F_matrix, to_F_vec};
use ark_ff::PrimeField;
use ark_pallas::Fr;
pub fn get_test_r1cs<F: PrimeField>() -> R1CS<F> {
// R1CS for: x^3 + x + 5 = y (example from article
// https://www.vitalik.ca/general/2016/12/10/qap.html )
let A = to_F_matrix::<F>(vec![
vec![0, 1, 0, 0, 0, 0],
vec![0, 0, 0, 1, 0, 0],
vec![0, 1, 0, 0, 1, 0],
vec![5, 0, 0, 0, 0, 1],
]);
let B = to_F_matrix::<F>(vec![
vec![0, 1, 0, 0, 0, 0],
vec![0, 1, 0, 0, 0, 0],
vec![1, 0, 0, 0, 0, 0],
vec![1, 0, 0, 0, 0, 0],
]);
let C = to_F_matrix::<F>(vec![
vec![0, 0, 0, 1, 0, 0],
vec![0, 0, 0, 0, 1, 0],
vec![0, 0, 0, 0, 0, 1],
vec![0, 0, 1, 0, 0, 0],
]);
R1CS::<F> { l: 1, A, B, C }
}
pub fn get_test_z<F: PrimeField>(input: usize) -> Vec<F> {
// z = (1, io, w)
to_F_vec(vec![
1,
input, // io
input * input * input + input + 5, // x^3 + x + 5
input * input, // x^2
input * input * input, // x^2 * x
input * input * input + input, // x^3 + x
])
}
#[test]
fn test_check_relation() {
let r1cs = get_test_r1cs::<Fr>();
let z = get_test_z(5);
r1cs.check_relation(&z).unwrap();
r1cs.relax().check_relation(&z).unwrap();
}
}

View File

@@ -0,0 +1,235 @@
/// Adaptation of the prover methods and structs from arkworks/poly-commit's KZG10 implementation
/// into the CommitmentProver trait.
///
/// The motivation to do so, is that we want to be able to use KZG / Pedersen for committing to
/// vectors indistinctly, and the arkworks KZG10 implementation contains all the methods under the
/// same trait, which requires the Pairing trait, where the prover does not need access to the
/// Pairing but only to G1.
/// For our case, we want the folding schemes prover to be agnostic to pairings, since in the
/// non-ethereum cases we may use non-pairing-friendly curves with Pedersen commitments, so the
/// trait & types that we use should not depend on the Pairing type for the prover. Therefore, we
/// separate the CommitmentSchemeProver from the setup and verify phases, so the prover can be
/// defined without depending on pairings.
use ark_ec::{pairing::Pairing, CurveGroup, VariableBaseMSM};
use ark_ff::PrimeField;
use ark_poly::{
univariate::{DenseOrSparsePolynomial, DensePolynomial},
DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial,
};
use ark_poly_commit::kzg10::{VerifierKey, KZG10};
use ark_std::rand::Rng;
use ark_std::{borrow::Cow, fmt::Debug};
use ark_std::{One, Zero};
use core::marker::PhantomData;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use super::CommitmentProver;
use crate::transcript::Transcript;
use crate::Error;
/// ProverKey defines a similar struct as in ark_poly_commit::kzg10::Powers, but instead of
/// depending on the Pairing trait it depends on the CurveGroup trait.
#[derive(Debug, Clone, Default, Eq, PartialEq)]
pub struct ProverKey<'a, C: CurveGroup> {
/// Group elements of the form `β^i G`, for different values of `i`.
pub powers_of_g: Cow<'a, [C::Affine]>,
}
pub struct KZGSetup<P: Pairing> {
_p: PhantomData<P>,
}
impl<'a, P> KZGSetup<P>
where
P: Pairing,
{
/// setup returns the tuple (ProverKey, VerifierKey). For real world deployments the setup must
/// be computed in the most trustless way possible, usually through a MPC ceremony.
pub fn setup<R: Rng>(rng: &mut R, len: usize) -> (ProverKey<'a, P::G1>, VerifierKey<P>) {
let len = len.next_power_of_two();
let universal_params = KZG10::<P, DensePolynomial<P::ScalarField>>::setup(len, false, rng)
.expect("Setup failed");
let powers_of_g = universal_params.powers_of_g[..=len].to_vec();
let powers = ProverKey::<P::G1> {
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
};
let vk = VerifierKey {
g: universal_params.powers_of_g[0],
gamma_g: universal_params.powers_of_gamma_g[&0],
h: universal_params.h,
beta_h: universal_params.beta_h,
prepared_h: universal_params.prepared_h.clone(),
prepared_beta_h: universal_params.prepared_beta_h.clone(),
};
(powers, vk)
}
}
/// KZGProver implements the CommitmentProver trait for the KZG commitment scheme.
#[derive(Debug, Clone, Default, Eq, PartialEq)]
pub struct KZGProver<'a, C: CurveGroup> {
_a: PhantomData<&'a ()>,
_c: PhantomData<C>,
}
impl<'a, C> CommitmentProver<C> for KZGProver<'a, C>
where
C: CurveGroup,
{
type Params = ProverKey<'a, C>;
/// Proof is a tuple containing (evaluation, proof)
type Proof = (C::ScalarField, C);
/// commit implements the CommitmentProver commit interface, adapting the implementation from
/// https://github.com/arkworks-rs/poly-commit/tree/c724fa666e935bbba8db5a1421603bab542e15ab/poly-commit/src/kzg10/mod.rs#L178
/// with the main difference being the removal of the blinding factors and the no-dependancy to
/// the Pairing trait.
fn commit(
params: &Self::Params,
v: &[C::ScalarField],
_blind: &C::ScalarField,
) -> Result<C, Error> {
if !_blind.is_zero() {
return Err(Error::NotSupportedYet("blinding factors".to_string()));
}
let polynomial = poly_from_vec(v.to_vec())?;
check_degree_is_too_large(polynomial.degree(), params.powers_of_g.len())?;
let (num_leading_zeros, plain_coeffs) =
skip_first_zero_coeffs_and_convert_to_bigints(&polynomial);
let commitment = <C as VariableBaseMSM>::msm_bigint(
&params.powers_of_g[num_leading_zeros..],
&plain_coeffs,
);
Ok(commitment)
}
/// prove implements the CommitmentProver prove interface, adapting the implementation from
/// https://github.com/arkworks-rs/poly-commit/tree/c724fa666e935bbba8db5a1421603bab542e15ab/poly-commit/src/kzg10/mod.rs#L307
/// with the main difference being the removal of the blinding factors and the no-dependancy to
/// the Pairing trait.
fn prove(
params: &Self::Params,
transcript: &mut impl Transcript<C>,
cm: &C,
v: &[C::ScalarField],
_blind: &C::ScalarField,
) -> Result<Self::Proof, Error> {
if !_blind.is_zero() {
return Err(Error::NotSupportedYet("blinding factors".to_string()));
}
let polynomial = poly_from_vec(v.to_vec())?;
check_degree_is_too_large(polynomial.degree(), params.powers_of_g.len())?;
transcript.absorb_point(cm)?;
let challenge = transcript.get_challenge();
// Compute q(x) = (p(x) - p(z)) / (x-z). Observe that this quotient does not change with z
// because p(z) is the remainder term. We can therefore omit p(z) when computing the
// quotient.
let divisor = DensePolynomial::<C::ScalarField>::from_coefficients_vec(vec![
-challenge,
C::ScalarField::one(),
]);
let (witness_poly, remainder_poly) = DenseOrSparsePolynomial::from(&polynomial)
.divide_with_q_and_r(&DenseOrSparsePolynomial::from(&divisor))
// the panic inside `divide_with_q_and_r` should never be reached, since the divisor
// polynomial is constructed right before and is set to not be zero. And the `.unwrap`
// should not give an error.
.unwrap();
let evaluation = remainder_poly[0];
check_degree_is_too_large(witness_poly.degree(), params.powers_of_g.len())?;
let (num_leading_zeros, witness_coeffs) =
skip_first_zero_coeffs_and_convert_to_bigints(&witness_poly);
let proof = <C as VariableBaseMSM>::msm_bigint(
&params.powers_of_g[num_leading_zeros..],
&witness_coeffs,
);
Ok((evaluation, proof))
}
}
/// returns the interpolated polynomial of degree=v.len().next_power_of_two(), which passes through all
/// the given elements of v.
fn poly_from_vec<F: PrimeField>(v: Vec<F>) -> Result<DensePolynomial<F>, Error> {
let D = GeneralEvaluationDomain::<F>::new(v.len()).ok_or(Error::NewDomainFail)?;
Ok(Evaluations::from_vec_and_domain(v, D).interpolate())
}
fn check_degree_is_too_large(
degree: usize,
num_powers: usize,
) -> Result<(), ark_poly_commit::error::Error> {
let num_coefficients = degree + 1;
if num_coefficients > num_powers {
Err(ark_poly_commit::error::Error::TooManyCoefficients {
num_coefficients,
num_powers,
})
} else {
Ok(())
}
}
fn skip_first_zero_coeffs_and_convert_to_bigints<F: PrimeField, P: DenseUVPolynomial<F>>(
p: &P,
) -> (usize, Vec<F::BigInt>) {
let mut num_leading_zeros = 0;
while num_leading_zeros < p.coeffs().len() && p.coeffs()[num_leading_zeros].is_zero() {
num_leading_zeros += 1;
}
let coeffs = convert_to_bigints(&p.coeffs()[num_leading_zeros..]);
(num_leading_zeros, coeffs)
}
fn convert_to_bigints<F: PrimeField>(p: &[F]) -> Vec<F::BigInt> {
ark_std::cfg_iter!(p)
.map(|s| s.into_bigint())
.collect::<Vec<_>>()
}
#[cfg(test)]
mod tests {
use ark_bn254::{Bn254, Fr, G1Projective as G1};
use ark_poly_commit::kzg10::{Commitment as KZG10Commitment, Proof as KZG10Proof, KZG10};
use ark_std::{test_rng, UniformRand};
use super::*;
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
#[test]
fn test_kzg_commitment_scheme() {
let rng = &mut test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let transcript_p = &mut PoseidonTranscript::<G1>::new(&poseidon_config);
let transcript_v = &mut PoseidonTranscript::<G1>::new(&poseidon_config);
let n = 10;
let (pk, vk): (ProverKey<G1>, VerifierKey<Bn254>) = KZGSetup::<Bn254>::setup(rng, n);
let v: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(rng)).take(n).collect();
let cm = KZGProver::<G1>::commit(&pk, &v, &Fr::zero()).unwrap();
let (eval, proof) =
KZGProver::<G1>::prove(&pk, transcript_p, &cm, &v, &Fr::zero()).unwrap();
// verify the proof:
// get evaluation challenge
transcript_v.absorb_point(&cm).unwrap();
let challenge = transcript_v.get_challenge();
// verify the KZG proof using arkworks method
assert!(KZG10::<Bn254, DensePolynomial<Fr>>::check(
&vk,
&KZG10Commitment(cm.into_affine()),
challenge,
eval,
&KZG10Proof::<Bn254> {
w: proof.into_affine(),
random_v: None,
},
)
.unwrap());
}
}

View File

@@ -0,0 +1,127 @@
use ark_ec::CurveGroup;
use ark_std::fmt::Debug;
use crate::transcript::Transcript;
use crate::Error;
pub mod kzg;
pub mod pedersen;
/// CommitmentProver defines the vector commitment scheme prover trait.
pub trait CommitmentProver<C: CurveGroup>: Clone + Debug {
type Params: Clone + Debug;
type Proof: Clone + Debug;
fn commit(
params: &Self::Params,
v: &[C::ScalarField],
blind: &C::ScalarField,
) -> Result<C, Error>;
fn prove(
params: &Self::Params,
transcript: &mut impl Transcript<C>,
cm: &C,
v: &[C::ScalarField],
blind: &C::ScalarField,
) -> Result<Self::Proof, Error>;
}
#[cfg(test)]
mod tests {
use super::*;
use ark_bn254::{Bn254, Fr, G1Projective as G1};
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
use ark_poly::univariate::DensePolynomial;
use ark_poly_commit::kzg10::{
Commitment as KZG10Commitment, Proof as KZG10Proof, VerifierKey, KZG10,
};
use ark_std::Zero;
use ark_std::{test_rng, UniformRand};
use super::kzg::{KZGProver, KZGSetup, ProverKey};
use super::pedersen::Pedersen;
use crate::transcript::{
poseidon::{poseidon_test_config, PoseidonTranscript},
Transcript,
};
// Computes the commitment of the two vectors using the given CommitmentProver, then computes
// their random linear combination, and returns it together with the proof of it.
fn commit_rlc_and_prove<C: CurveGroup, CP: CommitmentProver<C>>(
poseidon_config: &PoseidonConfig<C::ScalarField>,
params: &CP::Params,
r: C::ScalarField,
v_1: &[C::ScalarField],
v_2: &[C::ScalarField],
) -> Result<(C, CP::Proof), Error>
where
<C as ark_ec::Group>::ScalarField: Absorb,
{
let cm_1 = CP::commit(params, v_1, &C::ScalarField::zero())?;
let cm_2 = CP::commit(params, v_2, &C::ScalarField::zero())?;
// random linear combination of the commitment and the witness (vector v)
let cm_3 = cm_1 + cm_2.mul(r);
let v_3: Vec<C::ScalarField> = v_1.iter().zip(v_2).map(|(a, b)| *a + (r * b)).collect();
let transcript = &mut PoseidonTranscript::<C>::new(poseidon_config);
let proof = CP::prove(params, transcript, &cm_3, &v_3, &C::ScalarField::zero()).unwrap();
Ok((cm_3, proof))
}
#[test]
fn test_homomorphic_property_using_CommitmentProver_trait() {
let rng = &mut test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let n: usize = 100;
// set random vector for the test
let v_1: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(rng)).take(n).collect();
let v_2: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(rng)).take(n).collect();
// set a random challenge for the random linear combination
let r = Fr::rand(rng);
// setup params for Pedersen & KZG
let pedersen_params = Pedersen::<G1>::new_params(rng, n);
let (kzg_pk, kzg_vk): (ProverKey<G1>, VerifierKey<Bn254>) =
KZGSetup::<Bn254>::setup(rng, n);
// Pedersen commit the two vectors and return their random linear combination and proof
let (pedersen_cm, pedersen_proof) = commit_rlc_and_prove::<G1, Pedersen<G1>>(
&poseidon_config,
&pedersen_params,
r,
&v_1,
&v_2,
)
.unwrap();
// KZG commit the two vectors and return their random linear combination and proof
let (kzg_cm, kzg_proof) =
commit_rlc_and_prove::<G1, KZGProver<G1>>(&poseidon_config, &kzg_pk, r, &v_1, &v_2)
.unwrap();
// verify Pedersen
let transcript_v = &mut PoseidonTranscript::<G1>::new(&poseidon_config);
Pedersen::<G1>::verify(&pedersen_params, transcript_v, pedersen_cm, pedersen_proof)
.unwrap();
// verify KZG
let transcript_v = &mut PoseidonTranscript::<G1>::new(&poseidon_config);
transcript_v.absorb_point(&kzg_cm).unwrap();
let challenge = transcript_v.get_challenge();
// verify the KZG proof using arkworks method
assert!(KZG10::<Bn254, DensePolynomial<Fr>>::check(
&kzg_vk,
&KZG10Commitment(kzg_cm.into_affine()),
challenge,
kzg_proof.0, // eval
&KZG10Proof::<Bn254> {
w: kzg_proof.1.into_affine(), // proof
random_v: None,
},
)
.unwrap());
}
}

View File

@@ -0,0 +1,229 @@
use ark_ec::CurveGroup;
use ark_ff::Field;
use ark_r1cs_std::{boolean::Boolean, groups::GroupOpsBounds, prelude::CurveVar};
use ark_relations::r1cs::SynthesisError;
use ark_std::{rand::Rng, UniformRand};
use core::marker::PhantomData;
use super::CommitmentProver;
use crate::transcript::Transcript;
use crate::utils::vec::{vec_add, vec_scalar_mul};
use crate::Error;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Proof<C: CurveGroup> {
pub R: C,
pub u: Vec<C::ScalarField>,
pub r_u: C::ScalarField,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Params<C: CurveGroup> {
pub h: C,
pub generators: Vec<C::Affine>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Pedersen<C: CurveGroup> {
_c: PhantomData<C>,
}
impl<C: CurveGroup> Pedersen<C> {
pub fn new_params<R: Rng>(rng: &mut R, max: usize) -> Params<C> {
let generators: Vec<C::Affine> = std::iter::repeat_with(|| C::Affine::rand(rng))
.take(max.next_power_of_two())
.collect();
let params: Params<C> = Params::<C> {
h: C::rand(rng),
generators,
};
params
}
}
// implement the CommitmentProver trait for Pedersen
impl<C: CurveGroup> CommitmentProver<C> for Pedersen<C> {
type Params = Params<C>;
type Proof = Proof<C>;
fn commit(
params: &Self::Params,
v: &[C::ScalarField],
r: &C::ScalarField, // blinding factor
) -> Result<C, Error> {
if params.generators.len() < v.len() {
return Err(Error::PedersenParamsLen(params.generators.len(), v.len()));
}
// h⋅r + <g, v>
// use msm_unchecked because we already ensured at the if that lengths match
Ok(params.h.mul(r) + C::msm_unchecked(&params.generators[..v.len()], v))
}
fn prove(
params: &Params<C>,
transcript: &mut impl Transcript<C>,
cm: &C,
v: &[C::ScalarField],
r: &C::ScalarField, // blinding factor
) -> Result<Self::Proof, Error> {
if params.generators.len() < v.len() {
return Err(Error::PedersenParamsLen(params.generators.len(), v.len()));
}
transcript.absorb_point(cm)?;
let r1 = transcript.get_challenge();
let d = transcript.get_challenges(v.len());
// R = h⋅r_1 + <g, d>
// use msm_unchecked because we already ensured at the if that lengths match
let R: C = params.h.mul(r1) + C::msm_unchecked(&params.generators[..d.len()], &d);
transcript.absorb_point(&R)?;
let e = transcript.get_challenge();
// u = d + v⋅e
let u = vec_add(&vec_scalar_mul(v, &e), &d)?;
// r_u = e⋅r + r_1
let r_u = e * r + r1;
Ok(Self::Proof { R, u, r_u })
}
}
impl<C: CurveGroup> Pedersen<C> {
pub fn verify(
params: &Params<C>,
transcript: &mut impl Transcript<C>,
cm: C,
proof: Proof<C>,
) -> Result<(), Error> {
if params.generators.len() < proof.u.len() {
return Err(Error::PedersenParamsLen(
params.generators.len(),
proof.u.len(),
));
}
transcript.absorb_point(&cm)?;
transcript.get_challenge(); // r_1
transcript.get_challenges(proof.u.len()); // d
transcript.absorb_point(&proof.R)?;
let e = transcript.get_challenge();
// check that: R + cm⋅e == h⋅r_u + <g, u>
let lhs = proof.R + cm.mul(e);
// use msm_unchecked because we already ensured at the if that lengths match
let rhs = params.h.mul(proof.r_u)
+ C::msm_unchecked(&params.generators[..proof.u.len()], &proof.u);
if lhs != rhs {
return Err(Error::CommitmentVerificationFail);
}
Ok(())
}
}
pub type CF<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;
pub struct PedersenGadget<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
{
_cf: PhantomData<CF<C>>,
_c: PhantomData<C>,
_gc: PhantomData<GC>,
}
impl<C, GC> PedersenGadget<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
pub fn commit(
h: GC,
g: Vec<GC>,
v: Vec<Vec<Boolean<CF<C>>>>,
r: Vec<Boolean<CF<C>>>,
) -> Result<GC, SynthesisError> {
let mut res = GC::zero();
res += h.scalar_mul_le(r.iter())?;
for (i, v_i) in v.iter().enumerate() {
res += g[i].scalar_mul_le(v_i.iter())?;
}
Ok(res)
}
}
#[cfg(test)]
mod tests {
use ark_ff::{BigInteger, PrimeField};
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, bits::boolean::Boolean, eq::EqGadget};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;
use super::*;
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
#[test]
fn test_pedersen_vector() {
let mut rng = ark_std::test_rng();
let n: usize = 10;
// setup params
let params = Pedersen::<Projective>::new_params(&mut rng, n);
let poseidon_config = poseidon_test_config::<Fr>();
// init Prover's transcript
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
// init Verifier's transcript
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
let v: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(n)
.collect();
let r: Fr = Fr::rand(&mut rng);
let cm = Pedersen::<Projective>::commit(&params, &v, &r).unwrap();
let proof = Pedersen::<Projective>::prove(&params, &mut transcript_p, &cm, &v, &r).unwrap();
Pedersen::<Projective>::verify(&params, &mut transcript_v, cm, proof).unwrap();
}
#[test]
fn test_pedersen_circuit() {
let mut rng = ark_std::test_rng();
let n: usize = 10;
// setup params
let params = Pedersen::<Projective>::new_params(&mut rng, n);
let v: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(n)
.collect();
let r: Fr = Fr::rand(&mut rng);
let cm = Pedersen::<Projective>::commit(&params, &v, &r).unwrap();
// circuit
let cs = ConstraintSystem::<Fq>::new_ref();
let v_bits: Vec<Vec<bool>> = v.iter().map(|val| val.into_bigint().to_bits_le()).collect();
let r_bits: Vec<bool> = r.into_bigint().to_bits_le();
// prepare inputs
let vVar: Vec<Vec<Boolean<Fq>>> = v_bits
.iter()
.map(|val_bits| {
Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(val_bits.clone())).unwrap()
})
.collect();
let rVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
let gVar = Vec::<GVar>::new_witness(cs.clone(), || Ok(params.generators)).unwrap();
let hVar = GVar::new_witness(cs.clone(), || Ok(params.h)).unwrap();
let expected_cmVar = GVar::new_witness(cs.clone(), || Ok(cm)).unwrap();
// use the gadget
let cmVar = PedersenGadget::<Projective, GVar>::commit(hVar, gVar, vVar, rVar).unwrap();
cmVar.enforce_equal(&expected_cmVar).unwrap();
}
}

View File

@@ -0,0 +1,5 @@
// used for the RO challenges.
// From [Srinath Setty](https://microsoft.com/en-us/research/people/srinath/): In Nova, soundness
// error ≤ 2/|S|, where S is the subset of the field F from which the challenges are drawn. In this
// case, we keep the size of S close to 2^128.
pub const N_BITS_RO: usize = 128;

View File

@@ -0,0 +1,10 @@
/// Circuits and gadgets shared across the different folding schemes.
use ark_ec::CurveGroup;
use ark_ff::Field;
pub mod nonnative;
pub mod sum_check;
pub mod utils;
// CF represents the constraints field
pub type CF<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;

View File

@@ -0,0 +1,131 @@
use ark_ec::{AffineRepr, CurveGroup};
use ark_ff::PrimeField;
use ark_r1cs_std::fields::nonnative::{params::OptimizationType, AllocatedNonNativeFieldVar};
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::{fp::FpVar, nonnative::NonNativeFieldVar},
ToConstraintFieldGadget,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::{One, Zero};
use core::borrow::Borrow;
/// NonNativeAffineVar represents an elliptic curve point in Affine represenation in the non-native
/// field, over the constraint field. It is not intended to perform operations, but just to contain
/// the affine coordinates in order to perform hash operations of the point.
#[derive(Debug, Clone)]
pub struct NonNativeAffineVar<F: PrimeField> {
pub x: Vec<FpVar<F>>,
pub y: Vec<FpVar<F>>,
}
impl<C> AllocVar<C, C::ScalarField> for NonNativeAffineVar<C::ScalarField>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
fn new_variable<T: Borrow<C>>(
cs: impl Into<Namespace<C::ScalarField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let affine = val.borrow().into_affine();
let zero_point = (&C::BaseField::zero(), &C::BaseField::one());
let xy = affine.xy().unwrap_or(zero_point);
let x = NonNativeFieldVar::<C::BaseField, C::ScalarField>::new_variable(
cs.clone(),
|| Ok(xy.0),
mode,
)?
.to_constraint_field()?;
let y = NonNativeFieldVar::<C::BaseField, C::ScalarField>::new_variable(
cs.clone(),
|| Ok(xy.1),
mode,
)?
.to_constraint_field()?;
Ok(Self { x, y })
})
}
}
/// Wrapper on top of [`point_to_nonnative_limbs_custom_opt`] which always uses
/// [`OptimizationType::Weight`].
#[allow(clippy::type_complexity)]
pub fn point_to_nonnative_limbs<C: CurveGroup>(
p: C,
) -> Result<(Vec<C::ScalarField>, Vec<C::ScalarField>), SynthesisError>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
point_to_nonnative_limbs_custom_opt(p, OptimizationType::Weight)
}
/// Used to compute (outside the circuit) the limbs representation of a point that matches the one
/// used in-circuit, and in particular this method allows to specify which [`OptimizationType`] to
/// use.
#[allow(clippy::type_complexity)]
pub fn point_to_nonnative_limbs_custom_opt<C: CurveGroup>(
p: C,
optimization_type: OptimizationType,
) -> Result<(Vec<C::ScalarField>, Vec<C::ScalarField>), SynthesisError>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
let affine = p.into_affine();
if affine.is_zero() {
let x =
AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
&C::BaseField::zero(),
optimization_type,
)?;
let y =
AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
&C::BaseField::one(),
optimization_type,
)?;
return Ok((x, y));
}
let (x, y) = affine.xy().unwrap();
let x = AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
x,
optimization_type,
)?;
let y = AllocatedNonNativeFieldVar::<C::BaseField, C::ScalarField>::get_limbs_representations(
y,
optimization_type,
)?;
Ok((x, y))
}
#[cfg(test)]
mod tests {
use super::*;
use ark_pallas::{Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{UniformRand, Zero};
#[test]
fn test_alloc_nonnativeaffinevar() {
let cs = ConstraintSystem::<Fr>::new_ref();
// dealing with the 'zero' point should not panic when doing the unwrap
let p = Projective::zero();
NonNativeAffineVar::<Fr>::new_witness(cs.clone(), || Ok(p)).unwrap();
// check that point_to_nonnative_limbs returns the expected values
let mut rng = ark_std::test_rng();
let p = Projective::rand(&mut rng);
let pVar = NonNativeAffineVar::<Fr>::new_witness(cs.clone(), || Ok(p)).unwrap();
let (x, y) = point_to_nonnative_limbs(p).unwrap();
assert_eq!(pVar.x.value().unwrap(), x);
assert_eq!(pVar.y.value().unwrap(), y);
}
}

View File

@@ -0,0 +1,273 @@
use crate::utils::espresso::sum_check::SumCheck;
use crate::utils::virtual_polynomial::VPAuxInfo;
use crate::{
transcript::{
poseidon::{PoseidonTranscript, PoseidonTranscriptVar},
TranscriptVar,
},
utils::sum_check::{structs::IOPProof, IOPSumCheck},
};
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
/// Heavily inspired from testudo: https://github.com/cryptonetlab/testudo/tree/master
/// Some changes:
/// - Typings to better stick to ark_poly's API
/// - Uses `folding-schemes`' own `TranscriptVar` trait and `PoseidonTranscriptVar` struct
/// - API made closer to gadgets found in `folding-schemes`
use ark_ff::PrimeField;
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
eq::EqGadget,
fields::fp::FpVar,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use std::{borrow::Borrow, marker::PhantomData};
#[derive(Clone, Debug)]
pub struct DensePolynomialVar<F: PrimeField> {
pub coeffs: Vec<FpVar<F>>,
}
impl<F: PrimeField> AllocVar<DensePolynomial<F>, F> for DensePolynomialVar<F> {
fn new_variable<T: Borrow<DensePolynomial<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|c| {
let cs = cs.into();
let cp: &DensePolynomial<F> = c.borrow();
let mut coeffs_var = Vec::<FpVar<F>>::with_capacity(cp.coeffs.len());
for coeff in cp.coeffs.iter() {
let coeff_var = FpVar::<F>::new_variable(cs.clone(), || Ok(coeff), mode)?;
coeffs_var.push(coeff_var);
}
Ok(Self { coeffs: coeffs_var })
})
}
}
impl<F: PrimeField> DensePolynomialVar<F> {
pub fn eval_at_zero(&self) -> FpVar<F> {
self.coeffs[0].clone()
}
pub fn eval_at_one(&self) -> FpVar<F> {
let mut res = self.coeffs[0].clone();
for i in 1..self.coeffs.len() {
res = &res + &self.coeffs[i];
}
res
}
pub fn evaluate(&self, r: &FpVar<F>) -> FpVar<F> {
let mut eval = self.coeffs[0].clone();
let mut power = r.clone();
for i in 1..self.coeffs.len() {
eval += &power * &self.coeffs[i];
power *= r;
}
eval
}
}
#[derive(Clone, Debug)]
pub struct IOPProofVar<C: CurveGroup> {
// We have to be generic over a CurveGroup because instantiating a IOPProofVar will call IOPSumCheck which requires a CurveGroup
pub proofs: Vec<DensePolynomialVar<C::ScalarField>>,
pub claim: FpVar<C::ScalarField>,
}
impl<C: CurveGroup> AllocVar<IOPProof<C::ScalarField>, C::ScalarField> for IOPProofVar<C>
where
<C as Group>::ScalarField: Absorb,
{
fn new_variable<T: Borrow<IOPProof<C::ScalarField>>>(
cs: impl Into<Namespace<C::ScalarField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|c| {
let cs = cs.into();
let cp: &IOPProof<C::ScalarField> = c.borrow();
let claim = IOPSumCheck::<C, PoseidonTranscript<C>>::extract_sum(cp);
let claim = FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(claim), mode)?;
let mut proofs =
Vec::<DensePolynomialVar<C::ScalarField>>::with_capacity(cp.proofs.len());
for proof in cp.proofs.iter() {
let poly = DensePolynomial::from_coefficients_slice(&proof.coeffs);
let proof = DensePolynomialVar::<C::ScalarField>::new_variable(
cs.clone(),
|| Ok(poly),
mode,
)?;
proofs.push(proof);
}
Ok(Self { proofs, claim })
})
}
}
#[derive(Clone, Debug)]
pub struct VPAuxInfoVar<F: PrimeField> {
pub num_variables: FpVar<F>,
pub max_degree: FpVar<F>,
}
impl<F: PrimeField> AllocVar<VPAuxInfo<F>, F> for VPAuxInfoVar<F> {
fn new_variable<T: Borrow<VPAuxInfo<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|c| {
let cs = cs.into();
let cp: &VPAuxInfo<F> = c.borrow();
let num_variables = FpVar::<F>::new_variable(
cs.clone(),
|| Ok(F::from(cp.num_variables as u64)),
mode,
)?;
let max_degree =
FpVar::<F>::new_variable(cs.clone(), || Ok(F::from(cp.max_degree as u64)), mode)?;
Ok(Self {
num_variables,
max_degree,
})
})
}
}
#[derive(Debug, Clone)]
pub struct SumCheckVerifierGadget<C: CurveGroup> {
_f: PhantomData<C>,
}
impl<C: CurveGroup> SumCheckVerifierGadget<C> {
#[allow(clippy::type_complexity)]
pub fn verify(
iop_proof_var: &IOPProofVar<C>,
poly_aux_info_var: &VPAuxInfoVar<C::ScalarField>,
transcript_var: &mut PoseidonTranscriptVar<C::ScalarField>,
) -> Result<(Vec<FpVar<C::ScalarField>>, Vec<FpVar<C::ScalarField>>), SynthesisError> {
let mut e_vars = vec![iop_proof_var.claim.clone()];
let mut r_vars: Vec<FpVar<C::ScalarField>> = Vec::new();
transcript_var.absorb(poly_aux_info_var.num_variables.clone())?;
transcript_var.absorb(poly_aux_info_var.max_degree.clone())?;
for poly_var in iop_proof_var.proofs.iter() {
let res = poly_var.eval_at_one() + poly_var.eval_at_zero();
let e_var = e_vars.last().ok_or(SynthesisError::Unsatisfiable)?;
res.enforce_equal(e_var)?;
transcript_var.absorb_vec(&poly_var.coeffs)?;
let r_i_var = transcript_var.get_challenge()?;
e_vars.push(poly_var.evaluate(&r_i_var));
r_vars.push(r_i_var);
}
Ok((e_vars, r_vars))
}
}
#[cfg(test)]
mod tests {
use crate::{
folding::circuits::sum_check::{IOPProofVar, VPAuxInfoVar},
transcript::{
poseidon::{poseidon_test_config, PoseidonTranscript, PoseidonTranscriptVar},
Transcript, TranscriptVar,
},
utils::{
sum_check::{structs::IOPProof, IOPSumCheck, SumCheck},
virtual_polynomial::VirtualPolynomial,
},
};
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
use ark_ec::CurveGroup;
use ark_ff::Field;
use ark_pallas::{Fr, Projective};
use ark_poly::{
univariate::DensePolynomial, DenseMultilinearExtension, DenseUVPolynomial,
MultilinearExtension, Polynomial,
};
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use std::sync::Arc;
use super::SumCheckVerifierGadget;
pub type TestSumCheckProof<F> = (VirtualPolynomial<F>, PoseidonConfig<F>, IOPProof<F>);
/// Primarily used for testing the sumcheck gadget
/// Returns a random virtual polynomial, the poseidon config used and the associated sumcheck proof
pub fn get_test_sumcheck_proof<C: CurveGroup>(
num_vars: usize,
) -> TestSumCheckProof<C::ScalarField>
where
<C as ark_ec::Group>::ScalarField: Absorb,
{
let mut rng = ark_std::test_rng();
let poseidon_config: PoseidonConfig<C::ScalarField> =
poseidon_test_config::<C::ScalarField>();
let mut poseidon_transcript_prove = PoseidonTranscript::<C>::new(&poseidon_config);
let poly_mle = DenseMultilinearExtension::rand(num_vars, &mut rng);
let virtual_poly =
VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), C::ScalarField::ONE);
let sum_check: IOPProof<C::ScalarField> = IOPSumCheck::<C, PoseidonTranscript<C>>::prove(
&virtual_poly,
&mut poseidon_transcript_prove,
)
.unwrap();
(virtual_poly, poseidon_config, sum_check)
}
#[test]
fn test_sum_check_circuit() {
for num_vars in 1..15 {
let cs = ConstraintSystem::<Fr>::new_ref();
let (virtual_poly, poseidon_config, sum_check) =
get_test_sumcheck_proof::<Projective>(num_vars);
let mut poseidon_var: PoseidonTranscriptVar<Fr> =
PoseidonTranscriptVar::new(cs.clone(), &poseidon_config);
let iop_proof_var =
IOPProofVar::<Projective>::new_witness(cs.clone(), || Ok(&sum_check)).unwrap();
let poly_aux_info_var =
VPAuxInfoVar::<Fr>::new_witness(cs.clone(), || Ok(virtual_poly.aux_info)).unwrap();
let res = SumCheckVerifierGadget::<Projective>::verify(
&iop_proof_var,
&poly_aux_info_var,
&mut poseidon_var,
);
assert!(res.is_ok());
let (circuit_evals, r_challenges) = res.unwrap();
// 1. assert claim from circuit is equal to the one from the sum-check
let claim: Fr =
IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::extract_sum(&sum_check);
assert_eq!(circuit_evals[0].value().unwrap(), claim);
// 2. assert that all in-circuit evaluations are equal to the ones from the sum-check
for ((proof, point), circuit_eval) in sum_check
.proofs
.iter()
.zip(sum_check.point.iter())
.zip(circuit_evals.iter().skip(1))
// we skip the first one since it's the above checked claim
{
let poly = DensePolynomial::from_coefficients_slice(&proof.coeffs);
let eval = poly.evaluate(point);
assert_eq!(eval, circuit_eval.value().unwrap());
}
// 3. assert that all challenges are equal to the ones from the sum-check
for (point, r_challenge) in sum_check.point.iter().zip(r_challenges.iter()) {
assert_eq!(*point, r_challenge.value().unwrap());
}
assert!(cs.is_satisfied().unwrap());
}
}
}

View File

@@ -0,0 +1,75 @@
use ark_ff::PrimeField;
use ark_r1cs_std::fields::{fp::FpVar, FieldVar};
use ark_relations::r1cs::SynthesisError;
use std::marker::PhantomData;
/// EqEval is a gadget for computing $\tilde{eq}(a, b) = \prod_{i=1}^{l}(a_i \cdot b_i + (1 - a_i)(1 - b_i))$
/// :warning: This is not the ark_r1cs_std::eq::EqGadget
pub struct EqEvalGadget<F: PrimeField> {
_f: PhantomData<F>,
}
impl<F: PrimeField> EqEvalGadget<F> {
/// Gadget to evaluate eq polynomial.
/// Follows the implementation of `eq_eval` found in this crate.
pub fn eq_eval(x: Vec<FpVar<F>>, y: Vec<FpVar<F>>) -> Result<FpVar<F>, SynthesisError> {
if x.len() != y.len() {
return Err(SynthesisError::Unsatisfiable);
}
if x.is_empty() || y.is_empty() {
return Err(SynthesisError::AssignmentMissing);
}
let mut e = FpVar::<F>::one();
for (xi, yi) in x.iter().zip(y.iter()) {
let xi_yi = xi * yi;
e *= xi_yi.clone() + xi_yi - xi - yi + F::one();
}
Ok(e)
}
}
#[cfg(test)]
mod tests {
use crate::utils::virtual_polynomial::eq_eval;
use super::EqEvalGadget;
use ark_ff::Field;
use ark_pallas::Fr;
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
#[test]
pub fn test_eq_eval_gadget() {
let mut rng = test_rng();
let cs = ConstraintSystem::<Fr>::new_ref();
for i in 1..20 {
let x_vec: Vec<Fr> = (0..i).map(|_| Fr::rand(&mut rng)).collect();
let y_vec: Vec<Fr> = (0..i).map(|_| Fr::rand(&mut rng)).collect();
let x: Vec<FpVar<Fr>> = x_vec
.iter()
.map(|x| FpVar::<Fr>::new_witness(cs.clone(), || Ok(x)).unwrap())
.collect();
let y: Vec<FpVar<Fr>> = y_vec
.iter()
.map(|y| FpVar::<Fr>::new_witness(cs.clone(), || Ok(y)).unwrap())
.collect();
let expected_eq_eval = eq_eval::<Fr>(&x_vec, &y_vec).unwrap();
let gadget_eq_eval: FpVar<Fr> = EqEvalGadget::<Fr>::eq_eval(x, y).unwrap();
assert_eq!(expected_eq_eval, gadget_eq_eval.value().unwrap());
}
let x: Vec<FpVar<Fr>> = vec![];
let y: Vec<FpVar<Fr>> = vec![];
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(x, y);
assert!(gadget_eq_eval.is_err());
let x: Vec<FpVar<Fr>> = vec![];
let y: Vec<FpVar<Fr>> =
vec![FpVar::<Fr>::new_witness(cs.clone(), || Ok(&Fr::ONE)).unwrap()];
let gadget_eq_eval = EqEvalGadget::<Fr>::eq_eval(x, y);
assert!(gadget_eq_eval.is_err());
}
}

View File

@@ -0,0 +1,231 @@
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_std::One;
use ark_std::Zero;
use std::ops::Add;
use std::sync::Arc;
use ark_std::{rand::Rng, UniformRand};
use super::utils::compute_sum_Mz;
use crate::ccs::CCS;
use crate::commitment::{
pedersen::{Params as PedersenParams, Pedersen},
CommitmentProver,
};
use crate::utils::hypercube::BooleanHypercube;
use crate::utils::mle::matrix_to_mle;
use crate::utils::mle::vec_to_mle;
use crate::utils::virtual_polynomial::VirtualPolynomial;
use crate::Error;
/// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment.
#[derive(Debug, Clone)]
pub struct Witness<F: PrimeField> {
pub w: Vec<F>,
pub r_w: F, // randomness used in the Pedersen commitment of w
}
/// Committed CCS instance
#[derive(Debug, Clone)]
pub struct CCCS<C: CurveGroup> {
// Commitment to witness
pub C: C,
// Public input/output
pub x: Vec<C::ScalarField>,
}
impl<C: CurveGroup> CCS<C> {
pub fn to_cccs<R: Rng>(
&self,
rng: &mut R,
pedersen_params: &PedersenParams<C>,
z: &[C::ScalarField],
) -> Result<(CCCS<C>, Witness<C::ScalarField>), Error> {
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
let r_w = C::ScalarField::rand(rng);
let C = Pedersen::<C>::commit(pedersen_params, &w, &r_w)?;
Ok((
CCCS::<C> {
C,
x: z[1..(1 + self.l)].to_vec(),
},
Witness::<C::ScalarField> { w, r_w },
))
}
/// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
/// polynomial over x
pub fn compute_q(&self, z: &Vec<C::ScalarField>) -> VirtualPolynomial<C::ScalarField> {
let z_mle = vec_to_mle(self.s_prime, z);
let mut q = VirtualPolynomial::<C::ScalarField>::new(self.s);
for i in 0..self.q {
let mut prod: VirtualPolynomial<C::ScalarField> =
VirtualPolynomial::<C::ScalarField>::new(self.s);
for j in self.S[i].clone() {
let M_j = matrix_to_mle(self.M[j].clone());
let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.s_prime);
// Fold this sum into the running product
if prod.products.is_empty() {
// If this is the first time we are adding something to this virtual polynomial, we need to
// explicitly add the products using add_mle_list()
// XXX is this true? improve API
prod.add_mle_list([Arc::new(sum_Mz)], C::ScalarField::one())
.unwrap();
} else {
prod.mul_by_mle(Arc::new(sum_Mz), C::ScalarField::one())
.unwrap();
}
}
// Multiply by the product by the coefficient c_i
prod.scalar_mul(&self.c[i]);
// Add it to the running sum
q = q.add(&prod);
}
q
}
/// Computes Q(x) = eq(beta, x) * q(x)
/// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
/// polynomial over x
pub fn compute_Q(
&self,
z: &Vec<C::ScalarField>,
beta: &[C::ScalarField],
) -> VirtualPolynomial<C::ScalarField> {
let q = self.compute_q(z);
q.build_f_hat(beta).unwrap()
}
}
impl<C: CurveGroup> CCCS<C> {
/// Perform the check of the CCCS instance described at section 4.1
pub fn check_relation(
&self,
pedersen_params: &PedersenParams<C>,
ccs: &CCS<C>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
// opening, but checking that the Commmitment comes from committing to the witness.
if self.C != Pedersen::commit(pedersen_params, &w.w, &w.r_w)? {
return Err(Error::NotSatisfied);
}
// check CCCS relation
let z: Vec<C::ScalarField> =
[vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat();
// A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube
let q_x = ccs.compute_q(&z);
for x in BooleanHypercube::new(ccs.s) {
if !q_x.evaluate(&x).unwrap().is_zero() {
return Err(Error::NotSatisfied);
}
}
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::ccs::tests::{get_test_ccs, get_test_z};
use ark_std::test_rng;
use ark_std::UniformRand;
use ark_pallas::{Fr, Projective};
/// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the
/// hypercube, but to not-zero outside the hypercube.
#[test]
fn test_compute_q() {
let mut rng = test_rng();
let ccs = get_test_ccs::<Projective>();
let z = get_test_z(3);
let q = ccs.compute_q(&z);
// Evaluate inside the hypercube
for x in BooleanHypercube::new(ccs.s) {
assert_eq!(Fr::zero(), q.evaluate(&x).unwrap());
}
// Evaluate outside the hypercube
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
assert_ne!(Fr::zero(), q.evaluate(&beta).unwrap());
}
/// Perform some sanity checks on Q(x).
#[test]
fn test_compute_Q() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Compute Q(x) = eq(beta, x) * q(x).
let Q = ccs.compute_Q(&z, &beta);
// Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y)
// which interpolates the multivariate polynomial q(x) inside the hypercube.
//
// Observe that summing Q(x) inside the hypercube, directly computes G(\beta).
//
// Now, G(x) is multilinear and agrees with q(x) inside the hypercube. Since q(x) vanishes inside the
// hypercube, this means that G(x) also vanishes in the hypercube. Since G(x) is multilinear and vanishes
// inside the hypercube, this makes it the zero polynomial.
//
// Hence, evaluating G(x) at a random beta should give zero.
// Now sum Q(x) evaluations in the hypercube and expect it to be 0
let r = BooleanHypercube::new(ccs.s)
.map(|x| Q.evaluate(&x).unwrap())
.fold(Fr::zero(), |acc, result| acc + result);
assert_eq!(r, Fr::zero());
}
/// The polynomial G(x) (see above) interpolates q(x) inside the hypercube.
/// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point.
/// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside
#[test]
fn test_Q_against_q() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
// Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which
// should be equal to q(d), since G(x) interpolates q(x) inside the hypercube
let q = ccs.compute_q(&z);
for d in BooleanHypercube::new(ccs.s) {
let Q_at_d = ccs.compute_Q(&z, &d);
// Get G(d) by summing over Q_d(x) over the hypercube
let G_at_d = BooleanHypercube::new(ccs.s)
.map(|x| Q_at_d.evaluate(&x).unwrap())
.fold(Fr::zero(), |acc, result| acc + result);
assert_eq!(G_at_d, q.evaluate(&d).unwrap());
}
// Now test that they should disagree outside of the hypercube
let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let Q_at_r = ccs.compute_Q(&z, &r);
// Get G(d) by summing over Q_d(x) over the hypercube
let G_at_r = BooleanHypercube::new(ccs.s)
.map(|x| Q_at_r.evaluate(&x).unwrap())
.fold(Fr::zero(), |acc, result| acc + result);
assert_ne!(G_at_r, q.evaluate(&r).unwrap());
}
}

View File

@@ -0,0 +1,318 @@
// hypernova nimfs verifier circuit
// see section 5 in https://eprint.iacr.org/2023/573.pdf
use crate::{ccs::CCS, folding::circuits::utils::EqEvalGadget};
use ark_ec::CurveGroup;
use ark_r1cs_std::{
alloc::AllocVar,
fields::{fp::FpVar, FieldVar},
ToBitsGadget,
};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_std::Zero;
use std::marker::PhantomData;
/// Gadget to compute $\sum_{j \in [t]} \gamma^{j} \cdot e_1 \cdot \sigma_j + \gamma^{t+1} \cdot e_2 \cdot \sum_{i=1}^{q} c_i * \prod_{j \in S_i} \theta_j$.
/// This is the sum computed by the verifier and laid out in section 5, step 5 of "A multi-folding scheme for CCS".
pub struct ComputeCFromSigmasAndThetasGadget<C: CurveGroup> {
_c: PhantomData<C>,
}
impl<C: CurveGroup> ComputeCFromSigmasAndThetasGadget<C> {
/// Computes the sum $\sum_{j}^{j + n} \gamma^{j} \cdot eq_eval \cdot \sigma_{j}$, where $n$ is the length of the `sigmas` vector
/// It corresponds to the first term of the sum that $\mathcal{V}$ has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
///
/// # Arguments
/// - `sigmas`: vector of $\sigma_j$ values
/// - `eq_eval`: the value of $\tilde{eq}(x_j, x^{\prime})$
/// - `gamma`: value $\gamma$
/// - `j`: the power at which we start to compute $\gamma^{j}$. This is needed in the context of multifolding.
///
/// # Notes
/// In the context of multifolding, `j` corresponds to `ccs.t` in `compute_c_from_sigmas_and_thetas`
fn sum_muls_gamma_pows_eq_sigma(
gamma: FpVar<C::ScalarField>,
eq_eval: FpVar<C::ScalarField>,
sigmas: Vec<FpVar<C::ScalarField>>,
j: FpVar<C::ScalarField>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
let mut result = FpVar::<C::ScalarField>::zero();
let mut gamma_pow = gamma.pow_le(&j.to_bits_le()?)?;
for sigma in sigmas {
result += gamma_pow.clone() * eq_eval.clone() * sigma;
gamma_pow *= gamma.clone();
}
Ok(result)
}
/// Computes $\sum_{i=1}^{q} c_i * \prod_{j \in S_i} theta_j$
///
/// # Arguments
/// - `c_i`: vector of $c_i$ values
/// - `thetas`: vector of pre-processed $\thetas[j]$ values corresponding to a particular `ccs.S[i]`
///
/// # Notes
/// This is a part of the second term of the sum that $\mathcal{V}$ has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
/// The first term is computed by `SumMulsGammaPowsEqSigmaGadget::sum_muls_gamma_pows_eq_sigma`.
/// This is a doct product between a vector of c_i values and a vector of pre-processed $\theta_j$ values, where $j$ is a value from $S_i$.
/// Hence, this requires some pre-processing of the $\theta_j$ values, before running this gadget.
fn sum_ci_mul_prod_thetaj(
c_i: Vec<FpVar<C::ScalarField>>,
thetas: Vec<Vec<FpVar<C::ScalarField>>>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
let mut result = FpVar::<C::ScalarField>::zero();
for (i, c_i) in c_i.iter().enumerate() {
let prod = &thetas[i].iter().fold(FpVar::one(), |acc, e| acc * e);
result += c_i * prod;
}
Ok(result)
}
/// Computes the sum that the verifier has to compute at section 5, step 5 of "A multi-folding scheme for CCS".
///
/// # Arguments
/// - `cs`: constraint system
/// - `ccs`: the CCS instance
/// - `vec_sigmas`: vector of $\sigma_j$ values
/// - `vec_thetas`: vector of $\theta_j$ values
/// - `gamma`: value $\gamma$
/// - `beta`: vector of $\beta_j$ values
/// - `vec_r_x`: vector of $r_{x_j}$ values
/// - `vec_r_x_prime`: vector of $r_{x_j}^{\prime}$ values
///
/// # Notes
/// Arguments to this function are *almost* the same as the arguments to `compute_c_from_sigmas_and_thetas` in `utils.rs`.
#[allow(clippy::too_many_arguments)]
pub fn compute_c_from_sigmas_and_thetas(
cs: ConstraintSystemRef<C::ScalarField>,
ccs: &CCS<C>,
vec_sigmas: Vec<Vec<FpVar<C::ScalarField>>>,
vec_thetas: Vec<Vec<FpVar<C::ScalarField>>>,
gamma: FpVar<C::ScalarField>,
beta: Vec<FpVar<C::ScalarField>>,
vec_r_x: Vec<Vec<FpVar<C::ScalarField>>>,
vec_r_x_prime: Vec<FpVar<C::ScalarField>>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
let mut c =
FpVar::<C::ScalarField>::new_witness(cs.clone(), || Ok(C::ScalarField::zero()))?;
let t = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(ccs.t as u64))
})?;
let mut e_lcccs = Vec::new();
for r_x in vec_r_x.iter() {
let e_1 = EqEvalGadget::eq_eval(r_x.to_vec(), vec_r_x_prime.to_vec())?;
e_lcccs.push(e_1);
}
for (i, sigmas) in vec_sigmas.iter().enumerate() {
let i_var = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(i as u64))
})?;
let pow = i_var * t.clone();
c += Self::sum_muls_gamma_pows_eq_sigma(
gamma.clone(),
e_lcccs[i].clone(),
sigmas.to_vec(),
pow,
)?;
}
let mu = FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(vec_sigmas.len() as u64))
})?;
let e_2 = EqEvalGadget::eq_eval(beta, vec_r_x_prime)?;
for (k, thetas) in vec_thetas.iter().enumerate() {
// get prepared thetas. only step different from original `compute_c_from_sigmas_and_thetas`
let mut prepared_thetas = Vec::new();
for i in 0..ccs.q {
let prepared: Vec<FpVar<C::ScalarField>> =
ccs.S[i].iter().map(|j| thetas[*j].clone()).collect();
prepared_thetas.push(prepared.to_vec());
}
let c_i = Vec::<FpVar<C::ScalarField>>::new_witness(cs.clone(), || Ok(ccs.c.clone()))
.unwrap();
let lhs = Self::sum_ci_mul_prod_thetaj(c_i.clone(), prepared_thetas.clone())?;
// compute gamma^(t+1)
let pow = mu.clone() * t.clone()
+ FpVar::<C::ScalarField>::new_witness(cs.clone(), || {
Ok(C::ScalarField::from(k as u64))
})?;
let gamma_t1 = gamma.pow_le(&pow.to_bits_le()?)?;
c += gamma_t1.clone() * e_2.clone() * lhs.clone();
}
Ok(c)
}
}
#[cfg(test)]
mod tests {
use super::ComputeCFromSigmasAndThetasGadget;
use crate::{
ccs::{
tests::{get_test_ccs, get_test_z},
CCS,
},
commitment::pedersen::Pedersen,
folding::hypernova::utils::{
compute_c_from_sigmas_and_thetas, compute_sigmas_and_thetas, sum_ci_mul_prod_thetaj,
sum_muls_gamma_pows_eq_sigma,
},
utils::virtual_polynomial::eq_eval,
};
use ark_pallas::{Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
#[test]
pub fn test_sum_muls_gamma_pow_eq_sigma_gadget() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let gamma: Fr = Fr::rand(&mut rng);
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let mut e_lcccs = Vec::new();
for r_x in &vec![lcccs_instance.r_x] {
e_lcccs.push(eq_eval(r_x, &r_x_prime).unwrap());
}
// Initialize cs and gamma
let cs = ConstraintSystem::<Fr>::new_ref();
let gamma_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(gamma)).unwrap();
for (i, sigmas) in sigmas_thetas.0.iter().enumerate() {
let expected =
sum_muls_gamma_pows_eq_sigma(gamma, e_lcccs[i], sigmas, (i * ccs.t) as u64);
let sigmas_var =
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(sigmas.clone())).unwrap();
let eq_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(e_lcccs[i])).unwrap();
let pow =
FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::from((i * ccs.t) as u64))).unwrap();
let computed =
ComputeCFromSigmasAndThetasGadget::<Projective>::sum_muls_gamma_pows_eq_sigma(
gamma_var.clone(),
eq_var,
sigmas_var,
pow,
)
.unwrap();
assert_eq!(expected, computed.value().unwrap());
}
}
#[test]
pub fn test_sum_ci_mul_prod_thetaj_gadget() {
let mut rng = test_rng();
let ccs: CCS<Projective> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let mut e_lcccs = Vec::new();
for r_x in &vec![lcccs_instance.r_x] {
e_lcccs.push(eq_eval(r_x, &r_x_prime).unwrap());
}
// Initialize cs
let cs = ConstraintSystem::<Fr>::new_ref();
let vec_thetas = sigmas_thetas.1;
for thetas in vec_thetas.iter() {
// sum c_i * prod theta_j
let expected = sum_ci_mul_prod_thetaj(&ccs, thetas); // from `compute_c_from_sigmas_and_thetas`
let mut prepared_thetas = Vec::new();
for i in 0..ccs.q {
let prepared: Vec<Fr> = ccs.S[i].iter().map(|j| thetas[*j]).collect();
prepared_thetas
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(prepared)).unwrap());
}
let computed = ComputeCFromSigmasAndThetasGadget::<Projective>::sum_ci_mul_prod_thetaj(
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(ccs.c.clone())).unwrap(),
prepared_thetas,
)
.unwrap();
assert_eq!(expected, computed.value().unwrap());
}
}
#[test]
pub fn test_compute_c_from_sigmas_and_thetas_gadget() {
let ccs: CCS<Projective> = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let mut rng = test_rng();
let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let expected_c = compute_c_from_sigmas_and_thetas(
&ccs,
&sigmas_thetas,
gamma,
&beta,
&vec![lcccs_instance.r_x.clone()],
&r_x_prime,
);
let cs = ConstraintSystem::<Fr>::new_ref();
let mut vec_sigmas = Vec::new();
let mut vec_thetas = Vec::new();
for sigmas in sigmas_thetas.0 {
vec_sigmas
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(sigmas.clone())).unwrap());
}
for thetas in sigmas_thetas.1 {
vec_thetas
.push(Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(thetas.clone())).unwrap());
}
let vec_r_x =
vec![
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(lcccs_instance.r_x.clone()))
.unwrap(),
];
let vec_r_x_prime =
Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(r_x_prime.clone())).unwrap();
let gamma_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(gamma)).unwrap();
let beta_var = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(beta.clone())).unwrap();
let computed_c = ComputeCFromSigmasAndThetasGadget::compute_c_from_sigmas_and_thetas(
cs,
&ccs,
vec_sigmas,
vec_thetas,
gamma_var,
beta_var,
vec_r_x,
vec_r_x_prime,
)
.unwrap();
assert_eq!(expected_c, computed_c.value().unwrap());
}
}

View File

@@ -0,0 +1,188 @@
use ark_ec::CurveGroup;
use ark_poly::DenseMultilinearExtension;
use ark_std::One;
use std::sync::Arc;
use ark_std::{rand::Rng, UniformRand};
use super::cccs::Witness;
use super::utils::{compute_all_sum_Mz_evals, compute_sum_Mz};
use crate::ccs::CCS;
use crate::commitment::{
pedersen::{Params as PedersenParams, Pedersen},
CommitmentProver,
};
use crate::utils::mle::{matrix_to_mle, vec_to_mle};
use crate::utils::virtual_polynomial::VirtualPolynomial;
use crate::Error;
/// Linearized Committed CCS instance
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct LCCCS<C: CurveGroup> {
// Commitment to witness
pub C: C,
// Relaxation factor of z for folded LCCCS
pub u: C::ScalarField,
// Public input/output
pub x: Vec<C::ScalarField>,
// Random evaluation point for the v_i
pub r_x: Vec<C::ScalarField>,
// Vector of v_i
pub v: Vec<C::ScalarField>,
}
impl<C: CurveGroup> CCS<C> {
/// Compute v_j values of the linearized committed CCS form
/// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
fn compute_v_j(&self, z: &[C::ScalarField], r: &[C::ScalarField]) -> Vec<C::ScalarField> {
compute_all_sum_Mz_evals(&self.M, &z.to_vec(), r, self.s_prime)
}
pub fn to_lcccs<R: Rng>(
&self,
rng: &mut R,
pedersen_params: &PedersenParams<C>,
z: &[C::ScalarField],
) -> Result<(LCCCS<C>, Witness<C::ScalarField>), Error> {
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
let r_w = C::ScalarField::rand(rng);
let C = Pedersen::commit(pedersen_params, &w, &r_w)?;
let r_x: Vec<C::ScalarField> = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect();
let v = self.compute_v_j(z, &r_x);
Ok((
LCCCS::<C> {
C,
u: C::ScalarField::one(),
x: z[1..(1 + self.l)].to_vec(),
r_x,
v,
},
Witness::<C::ScalarField> { w, r_w },
))
}
}
impl<C: CurveGroup> LCCCS<C> {
/// Compute all L_j(x) polynomials
pub fn compute_Ls(
&self,
ccs: &CCS<C>,
z: &Vec<C::ScalarField>,
) -> Vec<VirtualPolynomial<C::ScalarField>> {
let z_mle = vec_to_mle(ccs.s_prime, z);
// Convert all matrices to MLE
let M_x_y_mle: Vec<DenseMultilinearExtension<C::ScalarField>> =
ccs.M.clone().into_iter().map(matrix_to_mle).collect();
let mut vec_L_j_x = Vec::with_capacity(ccs.t);
for M_j in M_x_y_mle {
let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
let sum_Mz_virtual =
VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), C::ScalarField::one());
let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap();
vec_L_j_x.push(L_j_x);
}
vec_L_j_x
}
/// Perform the check of the LCCCS instance described at section 4.2
pub fn check_relation(
&self,
pedersen_params: &PedersenParams<C>,
ccs: &CCS<C>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
// opening, but checking that the Commmitment comes from committing to the witness.
if self.C != Pedersen::commit(pedersen_params, &w.w, &w.r_w)? {
return Err(Error::NotSatisfied);
}
// check CCS relation
let z: Vec<C::ScalarField> = [vec![self.u], self.x.clone(), w.w.to_vec()].concat();
let computed_v = compute_all_sum_Mz_evals(&ccs.M, &z, &self.r_x, ccs.s_prime);
if computed_v != self.v {
return Err(Error::NotSatisfied);
}
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_std::Zero;
use crate::ccs::tests::{get_test_ccs, get_test_z};
use crate::utils::hypercube::BooleanHypercube;
use ark_std::test_rng;
use ark_pallas::{Fr, Projective};
#[test]
/// Test linearized CCCS v_j against the L_j(x)
fn test_lcccs_v_j() {
let mut rng = test_rng();
let ccs = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z.clone()).unwrap();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap();
// with our test vector comming from R1CS, v should have length 3
assert_eq!(lcccs.v.len(), 3);
let vec_L_j_x = lcccs.compute_Ls(&ccs, &z);
assert_eq!(vec_L_j_x.len(), lcccs.v.len());
for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
let sum_L_j_x = BooleanHypercube::new(ccs.s)
.map(|y| L_j_x.evaluate(&y).unwrap())
.fold(Fr::zero(), |acc, result| acc + result);
assert_eq!(v_i, sum_L_j_x);
}
}
/// Given a bad z, check that the v_j should not match with the L_j(x)
#[test]
fn test_bad_v_j() {
let mut rng = test_rng();
let ccs = get_test_ccs();
let z = get_test_z(3);
ccs.check_relation(&z.clone()).unwrap();
// Mutate z so that the relation does not hold
let mut bad_z = z.clone();
bad_z[3] = Fr::zero();
assert!(ccs.check_relation(&bad_z.clone()).is_err());
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
// Compute v_j with the right z
let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap();
// with our test vector comming from R1CS, v should have length 3
assert_eq!(lcccs.v.len(), 3);
// Bad compute L_j(x) with the bad z
let vec_L_j_x = lcccs.compute_Ls(&ccs, &bad_z);
assert_eq!(vec_L_j_x.len(), lcccs.v.len());
// Make sure that the LCCCS is not satisfied given these L_j(x)
// i.e. summing L_j(x) over the hypercube should not give v_j for all j
let mut satisfied = true;
for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
let sum_L_j_x = BooleanHypercube::new(ccs.s)
.map(|y| L_j_x.evaluate(&y).unwrap())
.fold(Fr::zero(), |acc, result| acc + result);
if v_i != sum_L_j_x {
satisfied = false;
}
}
assert!(!satisfied);
}
}

View File

@@ -0,0 +1,6 @@
/// Implements the scheme described in [HyperNova](https://eprint.iacr.org/2023/573.pdf)
pub mod cccs;
pub mod circuit;
pub mod lcccs;
pub mod nimfs;
pub mod utils;

View File

@@ -0,0 +1,721 @@
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ff::{Field, PrimeField};
use ark_poly::univariate::DensePolynomial;
use ark_poly::{DenseUVPolynomial, Polynomial};
use ark_std::{One, Zero};
use super::cccs::{Witness, CCCS};
use super::lcccs::LCCCS;
use super::utils::{compute_c_from_sigmas_and_thetas, compute_g, compute_sigmas_and_thetas};
use crate::ccs::CCS;
use crate::transcript::Transcript;
use crate::utils::hypercube::BooleanHypercube;
use crate::utils::sum_check::structs::IOPProof as SumCheckProof;
use crate::utils::sum_check::{IOPSumCheck, SumCheck};
use crate::utils::virtual_polynomial::VPAuxInfo;
use crate::Error;
use std::fmt::Debug;
use std::marker::PhantomData;
/// Proof defines a multifolding proof
#[derive(Debug)]
pub struct Proof<C: CurveGroup> {
pub sc_proof: SumCheckProof<C::ScalarField>,
pub sigmas_thetas: SigmasThetas<C::ScalarField>,
}
#[derive(Debug)]
pub struct SigmasThetas<F: PrimeField>(pub Vec<Vec<F>>, pub Vec<Vec<F>>);
#[derive(Debug)]
/// Implements the Non-Interactive Multi Folding Scheme described in section 5 of
/// [HyperNova](https://eprint.iacr.org/2023/573.pdf)
pub struct NIMFS<C: CurveGroup, T: Transcript<C>> {
pub _c: PhantomData<C>,
pub _t: PhantomData<T>,
}
impl<C: CurveGroup, T: Transcript<C>> NIMFS<C, T>
where
<C as Group>::ScalarField: Absorb,
{
pub fn fold(
lcccs: &[LCCCS<C>],
cccs: &[CCCS<C>],
sigmas_thetas: &SigmasThetas<C::ScalarField>,
r_x_prime: Vec<C::ScalarField>,
rho: C::ScalarField,
) -> LCCCS<C> {
let (sigmas, thetas) = (sigmas_thetas.0.clone(), sigmas_thetas.1.clone());
let mut C_folded = C::zero();
let mut u_folded = C::ScalarField::zero();
let mut x_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); lcccs[0].x.len()];
let mut v_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); sigmas[0].len()];
for i in 0..(lcccs.len() + cccs.len()) {
let rho_i = rho.pow([i as u64]);
let c: C;
let u: C::ScalarField;
let x: Vec<C::ScalarField>;
let v: Vec<C::ScalarField>;
if i < lcccs.len() {
c = lcccs[i].C;
u = lcccs[i].u;
x = lcccs[i].x.clone();
v = sigmas[i].clone();
} else {
c = cccs[i - lcccs.len()].C;
u = C::ScalarField::one();
x = cccs[i - lcccs.len()].x.clone();
v = thetas[i - lcccs.len()].clone();
}
C_folded += c.mul(rho_i);
u_folded += rho_i * u;
x_folded = x_folded
.iter()
.zip(
x.iter()
.map(|x_i| *x_i * rho_i)
.collect::<Vec<C::ScalarField>>(),
)
.map(|(a_i, b_i)| *a_i + b_i)
.collect();
v_folded = v_folded
.iter()
.zip(
v.iter()
.map(|x_i| *x_i * rho_i)
.collect::<Vec<C::ScalarField>>(),
)
.map(|(a_i, b_i)| *a_i + b_i)
.collect();
}
LCCCS::<C> {
C: C_folded,
u: u_folded,
x: x_folded,
r_x: r_x_prime,
v: v_folded,
}
}
pub fn fold_witness(
w_lcccs: &[Witness<C::ScalarField>],
w_cccs: &[Witness<C::ScalarField>],
rho: C::ScalarField,
) -> Witness<C::ScalarField> {
let mut w_folded: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w_lcccs[0].w.len()];
let mut r_w_folded = C::ScalarField::zero();
for i in 0..(w_lcccs.len() + w_cccs.len()) {
let rho_i = rho.pow([i as u64]);
let w: Vec<C::ScalarField>;
let r_w: C::ScalarField;
if i < w_lcccs.len() {
w = w_lcccs[i].w.clone();
r_w = w_lcccs[i].r_w;
} else {
w = w_cccs[i - w_lcccs.len()].w.clone();
r_w = w_cccs[i - w_lcccs.len()].r_w;
}
w_folded = w_folded
.iter()
.zip(
w.iter()
.map(|x_i| *x_i * rho_i)
.collect::<Vec<C::ScalarField>>(),
)
.map(|(a_i, b_i)| *a_i + b_i)
.collect();
r_w_folded += rho_i * r_w;
}
Witness {
w: w_folded,
r_w: r_w_folded,
}
}
/// Performs the multifolding prover. Given μ LCCCS instances and ν CCS instances, fold them
/// into a single LCCCS instance. Since this is the prover, also fold their witness.
/// Returns the final folded LCCCS, the folded witness, and the multifolding proof, which
/// contains the sumcheck proof and the helper sumcheck claim sigmas and thetas.
#[allow(clippy::type_complexity)]
pub fn prove(
transcript: &mut impl Transcript<C>,
ccs: &CCS<C>,
running_instances: &[LCCCS<C>],
new_instances: &[CCCS<C>],
w_lcccs: &[Witness<C::ScalarField>],
w_cccs: &[Witness<C::ScalarField>],
) -> Result<(Proof<C>, LCCCS<C>, Witness<C::ScalarField>), Error> {
// TODO appends to transcript
if running_instances.is_empty() {
return Err(Error::Empty);
}
if new_instances.is_empty() {
return Err(Error::Empty);
}
// construct the LCCCS z vector from the relaxation factor, public IO and witness
// XXX this deserves its own function in LCCCS
let mut z_lcccs = Vec::new();
for (i, running_instance) in running_instances.iter().enumerate() {
let z_1: Vec<C::ScalarField> = [
vec![running_instance.u],
running_instance.x.clone(),
w_lcccs[i].w.to_vec(),
]
.concat();
z_lcccs.push(z_1);
}
// construct the CCCS z vector from the public IO and witness
let mut z_cccs = Vec::new();
for (i, new_instance) in new_instances.iter().enumerate() {
let z_2: Vec<C::ScalarField> = [
vec![C::ScalarField::one()],
new_instance.x.clone(),
w_cccs[i].w.to_vec(),
]
.concat();
z_cccs.push(z_2);
}
// Step 1: Get some challenges
let gamma_scalar = C::ScalarField::from_le_bytes_mod_order(b"gamma");
let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta");
transcript.absorb(&gamma_scalar);
let gamma: C::ScalarField = transcript.get_challenge();
transcript.absorb(&beta_scalar);
let beta: Vec<C::ScalarField> = transcript.get_challenges(ccs.s);
// Compute g(x)
let g = compute_g(ccs, running_instances, &z_lcccs, &z_cccs, gamma, &beta);
// Step 3: Run the sumcheck prover
let sumcheck_proof = IOPSumCheck::<C, T>::prove(&g, transcript)
.map_err(|err| Error::SumCheckProveError(err.to_string()))?;
// Note: The following two "sanity checks" are done for this prototype, in a final version
// they should be removed.
//
// Sanity check 1: evaluate g(x) over x \in {0,1} (the boolean hypercube), and check that
// its sum is equal to the extracted_sum from the SumCheck.
//////////////////////////////////////////////////////////////////////
let mut g_over_bhc = C::ScalarField::zero();
for x in BooleanHypercube::new(ccs.s) {
g_over_bhc += g.evaluate(&x)?;
}
// note: this is the sum of g(x) over the whole boolean hypercube
let extracted_sum = IOPSumCheck::<C, T>::extract_sum(&sumcheck_proof);
if extracted_sum != g_over_bhc {
return Err(Error::NotEqual);
}
// Sanity check 2: expect \sum v_j * gamma^j to be equal to the sum of g(x) over the
// boolean hypercube (and also equal to the extracted_sum from the SumCheck).
let mut sum_v_j_gamma = C::ScalarField::zero();
for (i, running_instance) in running_instances.iter().enumerate() {
for j in 0..running_instance.v.len() {
let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
sum_v_j_gamma += running_instance.v[j] * gamma_j;
}
}
if g_over_bhc != sum_v_j_gamma {
return Err(Error::NotEqual);
}
if extracted_sum != sum_v_j_gamma {
return Err(Error::NotEqual);
}
//////////////////////////////////////////////////////////////////////
// Step 2: dig into the sumcheck and extract r_x_prime
let r_x_prime = sumcheck_proof.point.clone();
// Step 4: compute sigmas and thetas
let sigmas_thetas = compute_sigmas_and_thetas(ccs, &z_lcccs, &z_cccs, &r_x_prime);
// Step 6: Get the folding challenge
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
transcript.absorb(&rho_scalar);
let rho: C::ScalarField = transcript.get_challenge();
// Step 7: Create the folded instance
let folded_lcccs = Self::fold(
running_instances,
new_instances,
&sigmas_thetas,
r_x_prime,
rho,
);
// Step 8: Fold the witnesses
let folded_witness = Self::fold_witness(w_lcccs, w_cccs, rho);
Ok((
Proof::<C> {
sc_proof: sumcheck_proof,
sigmas_thetas,
},
folded_lcccs,
folded_witness,
))
}
/// Performs the multifolding verifier. Given μ LCCCS instances and ν CCS instances, fold them
/// into a single LCCCS instance.
/// Returns the folded LCCCS instance.
pub fn verify(
transcript: &mut impl Transcript<C>,
ccs: &CCS<C>,
running_instances: &[LCCCS<C>],
new_instances: &[CCCS<C>],
proof: Proof<C>,
) -> Result<LCCCS<C>, Error> {
// TODO appends to transcript
if running_instances.is_empty() {
return Err(Error::Empty);
}
if new_instances.is_empty() {
return Err(Error::Empty);
}
// Step 1: Get some challenges
let gamma_scalar = C::ScalarField::from_le_bytes_mod_order(b"gamma");
transcript.absorb(&gamma_scalar);
let gamma: C::ScalarField = transcript.get_challenge();
let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta");
transcript.absorb(&beta_scalar);
let beta: Vec<C::ScalarField> = transcript.get_challenges(ccs.s);
let vp_aux_info = VPAuxInfo::<C::ScalarField> {
max_degree: ccs.d + 1,
num_variables: ccs.s,
phantom: PhantomData::<C::ScalarField>,
};
// Step 3: Start verifying the sumcheck
// First, compute the expected sumcheck sum: \sum gamma^j v_j
let mut sum_v_j_gamma = C::ScalarField::zero();
for (i, running_instance) in running_instances.iter().enumerate() {
for j in 0..running_instance.v.len() {
let gamma_j = gamma.pow([(i * ccs.t + j) as u64]);
sum_v_j_gamma += running_instance.v[j] * gamma_j;
}
}
// Verify the interactive part of the sumcheck
let sumcheck_subclaim =
IOPSumCheck::<C, T>::verify(sum_v_j_gamma, &proof.sc_proof, &vp_aux_info, transcript)
.map_err(|err| Error::SumCheckVerifyError(err.to_string()))?;
// Step 2: Dig into the sumcheck claim and extract the randomness used
let r_x_prime = sumcheck_subclaim.point.clone();
// Step 5: Finish verifying sumcheck (verify the claim c)
let c = compute_c_from_sigmas_and_thetas(
ccs,
&proof.sigmas_thetas,
gamma,
&beta,
&running_instances
.iter()
.map(|lcccs| lcccs.r_x.clone())
.collect(),
&r_x_prime,
);
// check that the g(r_x') from the sumcheck proof is equal to the computed c from sigmas&thetas
if c != sumcheck_subclaim.expected_evaluation {
return Err(Error::NotEqual);
}
// Sanity check: we can also compute g(r_x') from the proof last evaluation value, and
// should be equal to the previously obtained values.
let g_on_rxprime_from_sumcheck_last_eval =
DensePolynomial::from_coefficients_slice(&proof.sc_proof.proofs.last().unwrap().coeffs)
.evaluate(r_x_prime.last().unwrap());
if g_on_rxprime_from_sumcheck_last_eval != c {
return Err(Error::NotEqual);
}
if g_on_rxprime_from_sumcheck_last_eval != sumcheck_subclaim.expected_evaluation {
return Err(Error::NotEqual);
}
// Step 6: Get the folding challenge
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
transcript.absorb(&rho_scalar);
let rho: C::ScalarField = transcript.get_challenge();
// Step 7: Compute the folded instance
Ok(Self::fold(
running_instances,
new_instances,
&proof.sigmas_thetas,
r_x_prime,
rho,
))
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::ccs::tests::{get_test_ccs, get_test_z};
use crate::transcript::poseidon::poseidon_test_config;
use crate::transcript::poseidon::PoseidonTranscript;
use ark_std::test_rng;
use ark_std::UniformRand;
use crate::commitment::pedersen::Pedersen;
use ark_pallas::{Fr, Projective};
#[test]
fn test_fold() {
let ccs = get_test_ccs();
let z1 = get_test_z::<Fr>(3);
let z2 = get_test_z::<Fr>(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let mut rng = test_rng();
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let (cccs, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z2).unwrap();
lcccs.check_relation(&pedersen_params, &ccs, &w1).unwrap();
cccs.check_relation(&pedersen_params, &ccs, &w2).unwrap();
let mut rng = test_rng();
let rho = Fr::rand(&mut rng);
let folded = NIMFS::<Projective, PoseidonTranscript<Projective>>::fold(
&[lcccs],
&[cccs],
&sigmas_thetas,
r_x_prime,
rho,
);
let w_folded =
NIMFS::<Projective, PoseidonTranscript<Projective>>::fold_witness(&[w1], &[w2], rho);
// check lcccs relation
folded
.check_relation(&pedersen_params, &ccs, &w_folded)
.unwrap();
}
/// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper)
#[test]
pub fn test_basic_multifolding() {
let mut rng = test_rng();
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
// Generate a satisfying witness
let z_1 = get_test_z(3);
// Generate another satisfying witness
let z_2 = get_test_z(4);
// Create the LCCCS instance out of z_1
let (running_instance, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1).unwrap();
// Create the CCCS instance out of z_2
let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2).unwrap();
// Prover's transcript
let poseidon_config = poseidon_test_config::<Fr>();
let mut transcript_p: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
// Run the prover side of the multifolding
let (proof, folded_lcccs, folded_witness) =
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
&mut transcript_p,
&ccs,
&[running_instance.clone()],
&[new_instance.clone()],
&[w1],
&[w2],
)
.unwrap();
// Verifier's transcript
let mut transcript_v: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
// Run the verifier side of the multifolding
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
&mut transcript_v,
&ccs,
&[running_instance.clone()],
&[new_instance.clone()],
proof,
)
.unwrap();
assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs
.check_relation(&pedersen_params, &ccs, &folded_witness)
.unwrap();
}
/// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance
#[test]
pub fn test_multifolding_two_instances_multiple_steps() {
let mut rng = test_rng();
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
// LCCCS witness
let z_1 = get_test_z(2);
let (mut running_instance, mut w1) =
ccs.to_lcccs(&mut rng, &pedersen_params, &z_1).unwrap();
let poseidon_config = poseidon_test_config::<Fr>();
let mut transcript_p: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
let mut transcript_v: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
let n: usize = 10;
for i in 3..n {
println!("\niteration: i {}", i); // DBG
// CCS witness
let z_2 = get_test_z(i);
println!("z_2 {:?}", z_2); // DBG
let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2).unwrap();
// run the prover side of the multifolding
let (proof, folded_lcccs, folded_witness) =
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
&mut transcript_p,
&ccs,
&[running_instance.clone()],
&[new_instance.clone()],
&[w1],
&[w2],
)
.unwrap();
// run the verifier side of the multifolding
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
&mut transcript_v,
&ccs,
&[running_instance.clone()],
&[new_instance.clone()],
proof,
)
.unwrap();
assert_eq!(folded_lcccs, folded_lcccs_v);
// check that the folded instance with the folded witness holds the LCCCS relation
println!("check_relation {}", i);
folded_lcccs
.check_relation(&pedersen_params, &ccs, &folded_witness)
.unwrap();
running_instance = folded_lcccs;
w1 = folded_witness;
}
}
/// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step.
#[test]
pub fn test_multifolding_mu_nu_instances() {
let mut rng = test_rng();
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let mu = 10;
let nu = 15;
// Generate a mu LCCCS & nu CCCS satisfying witness
let mut z_lcccs = Vec::new();
for i in 0..mu {
let z = get_test_z(i + 3);
z_lcccs.push(z);
}
let mut z_cccs = Vec::new();
for i in 0..nu {
let z = get_test_z(nu + i + 3);
z_cccs.push(z);
}
// Create the LCCCS instances out of z_lcccs
let mut lcccs_instances = Vec::new();
let mut w_lcccs = Vec::new();
for z_i in z_lcccs.iter() {
let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap();
lcccs_instances.push(running_instance);
w_lcccs.push(w);
}
// Create the CCCS instance out of z_cccs
let mut cccs_instances = Vec::new();
let mut w_cccs = Vec::new();
for z_i in z_cccs.iter() {
let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap();
cccs_instances.push(new_instance);
w_cccs.push(w);
}
// Prover's transcript
let poseidon_config = poseidon_test_config::<Fr>();
let mut transcript_p: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
// Run the prover side of the multifolding
let (proof, folded_lcccs, folded_witness) =
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
&mut transcript_p,
&ccs,
&lcccs_instances,
&cccs_instances,
&w_lcccs,
&w_cccs,
)
.unwrap();
// Verifier's transcript
let mut transcript_v: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
// Run the verifier side of the multifolding
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
&mut transcript_v,
&ccs,
&lcccs_instances,
&cccs_instances,
proof,
)
.unwrap();
assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs
.check_relation(&pedersen_params, &ccs, &folded_witness)
.unwrap();
}
/// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step
/// and repeats the process doing multiple steps.
#[test]
pub fn test_multifolding_mu_nu_instances_multiple_steps() {
let mut rng = test_rng();
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let poseidon_config = poseidon_test_config::<Fr>();
// Prover's transcript
let mut transcript_p: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
// Verifier's transcript
let mut transcript_v: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init"));
let n_steps = 3;
// number of LCCCS & CCCS instances in each multifolding step
let mu = 10;
let nu = 15;
// Generate a mu LCCCS & nu CCCS satisfying witness, for each step
for step in 0..n_steps {
let mut z_lcccs = Vec::new();
for i in 0..mu {
let z = get_test_z(step + i + 3);
z_lcccs.push(z);
}
let mut z_cccs = Vec::new();
for i in 0..nu {
let z = get_test_z(nu + i + 3);
z_cccs.push(z);
}
// Create the LCCCS instances out of z_lcccs
let mut lcccs_instances = Vec::new();
let mut w_lcccs = Vec::new();
for z_i in z_lcccs.iter() {
let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap();
lcccs_instances.push(running_instance);
w_lcccs.push(w);
}
// Create the CCCS instance out of z_cccs
let mut cccs_instances = Vec::new();
let mut w_cccs = Vec::new();
for z_i in z_cccs.iter() {
let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap();
cccs_instances.push(new_instance);
w_cccs.push(w);
}
// Run the prover side of the multifolding
let (proof, folded_lcccs, folded_witness) =
NIMFS::<Projective, PoseidonTranscript<Projective>>::prove(
&mut transcript_p,
&ccs,
&lcccs_instances,
&cccs_instances,
&w_lcccs,
&w_cccs,
)
.unwrap();
// Run the verifier side of the multifolding
let folded_lcccs_v = NIMFS::<Projective, PoseidonTranscript<Projective>>::verify(
&mut transcript_v,
&ccs,
&lcccs_instances,
&cccs_instances,
proof,
)
.unwrap();
assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs
.check_relation(&pedersen_params, &ccs, &folded_witness)
.unwrap();
}
}
}

View File

@@ -0,0 +1,382 @@
use ark_ec::CurveGroup;
use ark_ff::{Field, PrimeField};
use ark_poly::DenseMultilinearExtension;
use ark_poly::MultilinearExtension;
use ark_std::{One, Zero};
use std::ops::Add;
use crate::utils::multilinear_polynomial::fix_variables;
use crate::utils::multilinear_polynomial::scalar_mul;
use super::lcccs::LCCCS;
use super::nimfs::SigmasThetas;
use crate::ccs::CCS;
use crate::utils::hypercube::BooleanHypercube;
use crate::utils::mle::dense_vec_to_mle;
use crate::utils::mle::matrix_to_mle;
use crate::utils::vec::SparseMatrix;
use crate::utils::virtual_polynomial::{eq_eval, VirtualPolynomial};
/// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) for all j values
/// in 0..self.t
pub fn compute_all_sum_Mz_evals<F: PrimeField>(
vec_M: &[SparseMatrix<F>],
z: &Vec<F>,
r: &[F],
s_prime: usize,
) -> Vec<F> {
// Convert z to MLE
let z_y_mle = dense_vec_to_mle(s_prime, z);
// Convert all matrices to MLE
let M_x_y_mle: Vec<DenseMultilinearExtension<F>> =
vec_M.iter().cloned().map(matrix_to_mle).collect();
let mut v = Vec::with_capacity(M_x_y_mle.len());
for M_i in M_x_y_mle {
let sum_Mz = compute_sum_Mz(M_i, &z_y_mle, s_prime);
let v_i = sum_Mz.evaluate(r).unwrap();
v.push(v_i);
}
v
}
/// Return the multilinear polynomial p(x) = \sum_{y \in {0,1}^s'} M_j(x, y) * z(y)
pub fn compute_sum_Mz<F: PrimeField>(
M_j: DenseMultilinearExtension<F>,
z: &DenseMultilinearExtension<F>,
s_prime: usize,
) -> DenseMultilinearExtension<F> {
let mut sum_Mz = DenseMultilinearExtension {
evaluations: vec![F::zero(); M_j.evaluations.len()],
num_vars: M_j.num_vars - s_prime,
};
let bhc = BooleanHypercube::new(s_prime);
for y in bhc.into_iter() {
// In a slightly counter-intuitive fashion fix_variables() fixes the right-most variables of the polynomial. So
// for a polynomial M(x,y) and a random field element r, if we do fix_variables(M,r) we will get M(x,r).
let M_j_y = fix_variables(&M_j, &y);
let z_y = z.evaluate(&y).unwrap();
let M_j_z = scalar_mul(&M_j_y, &z_y);
sum_Mz = sum_Mz.add(M_j_z);
}
sum_Mz
}
/// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS
/// instances
pub fn compute_sigmas_and_thetas<C: CurveGroup>(
ccs: &CCS<C>,
z_lcccs: &[Vec<C::ScalarField>],
z_cccs: &[Vec<C::ScalarField>],
r_x_prime: &[C::ScalarField],
) -> SigmasThetas<C::ScalarField> {
let mut sigmas: Vec<Vec<C::ScalarField>> = Vec::new();
for z_lcccs_i in z_lcccs {
// sigmas
let sigma_i = compute_all_sum_Mz_evals(&ccs.M, z_lcccs_i, r_x_prime, ccs.s_prime);
sigmas.push(sigma_i);
}
let mut thetas: Vec<Vec<C::ScalarField>> = Vec::new();
for z_cccs_i in z_cccs {
// thetas
let theta_i = compute_all_sum_Mz_evals(&ccs.M, z_cccs_i, r_x_prime, ccs.s_prime);
thetas.push(theta_i);
}
SigmasThetas(sigmas, thetas)
}
/// Computes the sum $\sum_{j = 0}^{n} \gamma^{\text{pow} + j} \cdot eq_eval \cdot \sigma_{j}$
/// `pow` corresponds to `i * ccs.t` in `compute_c_from_sigmas_and_thetas`
pub fn sum_muls_gamma_pows_eq_sigma<F: PrimeField>(
gamma: F,
eq_eval: F,
sigmas: &[F],
pow: u64,
) -> F {
let mut result = F::zero();
for (j, sigma_j) in sigmas.iter().enumerate() {
let gamma_j = gamma.pow([(pow + (j as u64))]);
result += gamma_j * eq_eval * sigma_j;
}
result
}
/// Computes $\sum_{i=1}^{q} c_i * \prod_{j \in S_i} theta_j$
pub fn sum_ci_mul_prod_thetaj<C: CurveGroup>(
ccs: &CCS<C>,
thetas: &[C::ScalarField],
) -> C::ScalarField {
let mut result = C::ScalarField::zero();
for i in 0..ccs.q {
let mut prod = C::ScalarField::one();
for j in ccs.S[i].clone() {
prod *= thetas[j];
}
result += ccs.c[i] * prod;
}
result
}
/// Compute the right-hand-side of step 5 of the multifolding scheme
pub fn compute_c_from_sigmas_and_thetas<C: CurveGroup>(
ccs: &CCS<C>,
st: &SigmasThetas<C::ScalarField>,
gamma: C::ScalarField,
beta: &[C::ScalarField],
vec_r_x: &Vec<Vec<C::ScalarField>>,
r_x_prime: &[C::ScalarField],
) -> C::ScalarField {
let (vec_sigmas, vec_thetas) = (st.0.clone(), st.1.clone());
let mut c = C::ScalarField::zero();
let mut e_lcccs = Vec::new();
for r_x in vec_r_x {
e_lcccs.push(eq_eval(r_x, r_x_prime).unwrap());
}
for (i, sigmas) in vec_sigmas.iter().enumerate() {
// (sum gamma^j * e_i * sigma_j)
c += sum_muls_gamma_pows_eq_sigma(gamma, e_lcccs[i], sigmas, (i * ccs.t) as u64);
}
let mu = vec_sigmas.len();
let e2 = eq_eval(beta, r_x_prime).unwrap();
for (k, thetas) in vec_thetas.iter().enumerate() {
// + gamma^{t+1} * e2 * sum c_i * prod theta_j
let lhs = sum_ci_mul_prod_thetaj(ccs, thetas);
let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]);
c += gamma_t1 * e2 * lhs;
}
c
}
/// Compute g(x) polynomial for the given inputs.
pub fn compute_g<C: CurveGroup>(
ccs: &CCS<C>,
running_instances: &[LCCCS<C>],
z_lcccs: &[Vec<C::ScalarField>],
z_cccs: &[Vec<C::ScalarField>],
gamma: C::ScalarField,
beta: &[C::ScalarField],
) -> VirtualPolynomial<C::ScalarField> {
let mu = running_instances.len();
let mut vec_Ls: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
for (i, running_instance) in running_instances.iter().enumerate() {
let mut Ls = running_instance.compute_Ls(ccs, &z_lcccs[i]);
vec_Ls.append(&mut Ls);
}
let mut vec_Q: Vec<VirtualPolynomial<C::ScalarField>> = Vec::new();
// for (i, _) in cccs_instances.iter().enumerate() {
for z_cccs_i in z_cccs.iter() {
let Q = ccs.compute_Q(z_cccs_i, beta);
vec_Q.push(Q);
}
let mut g = vec_Ls[0].clone();
// note: the following two loops can be integrated in the previous two loops, but left
// separated for clarity in the PoC implementation.
for (j, L_j) in vec_Ls.iter_mut().enumerate().skip(1) {
let gamma_j = gamma.pow([j as u64]);
L_j.scalar_mul(&gamma_j);
g = g.add(L_j);
}
for (i, Q_i) in vec_Q.iter_mut().enumerate() {
let gamma_mut_i = gamma.pow([(mu * ccs.t + i) as u64]);
Q_i.scalar_mul(&gamma_mut_i);
g = g.add(Q_i);
}
g
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::{Fr, Projective};
use ark_std::test_rng;
use ark_std::One;
use ark_std::UniformRand;
use ark_std::Zero;
use crate::ccs::tests::{get_test_ccs, get_test_z};
use crate::commitment::pedersen::Pedersen;
use crate::utils::multilinear_polynomial::tests::fix_last_variables;
use crate::utils::virtual_polynomial::eq_eval;
#[test]
fn test_compute_sum_Mz_over_boolean_hypercube() {
let ccs = get_test_ccs::<Projective>();
let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
let z_mle = dense_vec_to_mle(ccs.s_prime, &z);
// check that evaluating over all the values x over the boolean hypercube, the result of
// the next for loop is equal to 0
for x in BooleanHypercube::new(ccs.s) {
let mut r = Fr::zero();
for i in 0..ccs.q {
let mut Sj_prod = Fr::one();
for j in ccs.S[i].clone() {
let M_j = matrix_to_mle(ccs.M[j].clone());
let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
let sum_Mz_x = sum_Mz.evaluate(&x).unwrap();
Sj_prod *= sum_Mz_x;
}
r += Sj_prod * ccs.c[i];
}
assert_eq!(r, Fr::zero());
}
}
/// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which
/// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements
/// of the column and the values eq_i(r) where i is the row of that element
///
/// For example, for matrix M:
///
/// [2, 3, 4, 4
/// 4, 4, 3, 2
/// 2, 8, 9, 2
/// 9, 4, 2, 0]
///
/// The polynomial ~M(r,y) is a polynomial in F^2 which evaluates to the following values in the hypercube:
/// - M(00) = 2*eq_00(r) + 4*eq_10(r) + 2*eq_01(r) + 9*eq_11(r)
/// - M(10) = 3*eq_00(r) + 4*eq_10(r) + 8*eq_01(r) + 4*eq_11(r)
/// - M(01) = 4*eq_00(r) + 3*eq_10(r) + 9*eq_01(r) + 2*eq_11(r)
/// - M(11) = 4*eq_00(r) + 2*eq_10(r) + 2*eq_01(r) + 0*eq_11(r)
///
/// This is used by Hypernova in LCCCS to perform a verifier-chosen random linear combination between the columns
/// of the matrix and the z vector. This technique is also used extensively in "An Algebraic Framework for
/// Universal and Updatable SNARKs".
#[test]
fn test_compute_M_r_y_compression() {
let mut rng = test_rng();
// s = 2, s' = 3
let ccs = get_test_ccs::<Projective>();
let M = ccs.M[0].clone().to_dense();
let M_mle = matrix_to_mle(ccs.M[0].clone());
// Fix the polynomial ~M(r,y)
let r: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let M_r_y = fix_last_variables(&M_mle, &r);
// compute M_r_y the other way around
for j in 0..M[0].len() {
// Go over every column of M
let column_j: Vec<Fr> = M.clone().iter().map(|x| x[j]).collect();
// and perform the random lincomb between the elements of the column and eq_i(r)
let rlc = BooleanHypercube::new(ccs.s)
.enumerate()
.map(|(i, x)| column_j[i] * eq_eval(&x, &r).unwrap())
.fold(Fr::zero(), |acc, result| acc + result);
assert_eq!(M_r_y.evaluations[j], rlc);
}
}
#[test]
fn test_compute_sigmas_and_thetas() {
let ccs = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let mut rng = test_rng();
let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
let g = compute_g(
&ccs,
&[lcccs_instance.clone()],
&[z1.clone()],
&[z2.clone()],
gamma,
&beta,
);
// we expect g(r_x_prime) to be equal to:
// c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j
// from compute_c_from_sigmas_and_thetas
let expected_c = g.evaluate(&r_x_prime).unwrap();
let c = compute_c_from_sigmas_and_thetas::<Projective>(
&ccs,
&sigmas_thetas,
gamma,
&beta,
&vec![lcccs_instance.r_x],
&r_x_prime,
);
assert_eq!(c, expected_c);
}
#[test]
fn test_compute_g() {
let ccs = get_test_ccs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let mut rng = test_rng(); // TMP
let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let mut sum_v_j_gamma = Fr::zero();
for j in 0..lcccs_instance.v.len() {
let gamma_j = gamma.pow([j as u64]);
sum_v_j_gamma += lcccs_instance.v[j] * gamma_j;
}
// Compute g(x) with that r_x
let g = compute_g::<Projective>(
&ccs,
&[lcccs_instance.clone()],
&[z1.clone()],
&[z2.clone()],
gamma,
&beta,
);
// evaluate g(x) over x \in {0,1}^s
let mut g_on_bhc = Fr::zero();
for x in BooleanHypercube::new(ccs.s) {
g_on_bhc += g.evaluate(&x).unwrap();
}
// evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s
let mut sum_Lj_on_bhc = Fr::zero();
let vec_L = lcccs_instance.compute_Ls(&ccs, &z1);
for x in BooleanHypercube::new(ccs.s) {
for (j, Lj) in vec_L.iter().enumerate() {
let gamma_j = gamma.pow([j as u64]);
sum_Lj_on_bhc += Lj.evaluate(&x).unwrap() * gamma_j;
}
}
// Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q'
assert_ne!(g_on_bhc, Fr::zero());
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
// sum of gamma^j * Lj(x) over the boolean hypercube
assert_eq!(g_on_bhc, sum_Lj_on_bhc);
// evaluating g(x) over the boolean hypercube should give the same result as evaluating the
// sum of gamma^j * v_j over j \in [t]
assert_eq!(g_on_bhc, sum_v_j_gamma);
}
}

View File

@@ -0,0 +1,4 @@
pub mod circuits;
pub mod hypernova;
pub mod nova;
pub mod protogalaxy;

View File

@@ -0,0 +1,848 @@
/// contains [Nova](https://eprint.iacr.org/2021/370.pdf) related circuits
use ark_crypto_primitives::crh::{
poseidon::constraints::{CRHGadget, CRHParametersVar},
CRHSchemeGadget,
};
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_ff::{Field, PrimeField};
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToBitsGadget, ToConstraintFieldGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_std::fmt::Debug;
use ark_std::{One, Zero};
use core::{borrow::Borrow, marker::PhantomData};
use super::{
cyclefold::{
CycleFoldChallengeGadget, CycleFoldCommittedInstanceVar, NIFSFullGadget, CF_IO_LEN,
},
CommittedInstance,
};
use crate::constants::N_BITS_RO;
use crate::folding::circuits::nonnative::{point_to_nonnative_limbs, NonNativeAffineVar};
use crate::frontend::FCircuit;
/// CF1 represents the ConstraintField used for the main Nova circuit which is over E1::Fr, where
/// E1 is the main curve where we do the folding.
pub type CF1<C> = <<C as CurveGroup>::Affine as AffineRepr>::ScalarField;
/// CF2 represents the ConstraintField used for the CycleFold circuit which is over E2::Fr=E1::Fq,
/// where E2 is the auxiliary curve (from [CycleFold](https://eprint.iacr.org/2023/1192.pdf)
/// approach) where we check the folding of the commitments (elliptic curve points).
pub type CF2<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;
/// CommittedInstanceVar contains the u, x, cmE and cmW values which are folded on the main Nova
/// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are
/// represented non-natively over the constraint field.
#[derive(Debug, Clone)]
pub struct CommittedInstanceVar<C: CurveGroup> {
pub u: FpVar<C::ScalarField>,
pub x: Vec<FpVar<C::ScalarField>>,
pub cmE: NonNativeAffineVar<C::ScalarField>,
pub cmW: NonNativeAffineVar<C::ScalarField>,
}
impl<C> AllocVar<CommittedInstance<C>, CF1<C>> for CommittedInstanceVar<C>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{
fn new_variable<T: Borrow<CommittedInstance<C>>>(
cs: impl Into<Namespace<CF1<C>>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let u = FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?;
let x: Vec<FpVar<C::ScalarField>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?;
let cmE = NonNativeAffineVar::<C::ScalarField>::new_variable(
cs.clone(),
|| Ok(val.borrow().cmE),
mode,
)?;
let cmW = NonNativeAffineVar::<C::ScalarField>::new_variable(
cs.clone(),
|| Ok(val.borrow().cmW),
mode,
)?;
Ok(Self { u, x, cmE, cmW })
})
}
}
impl<C> CommittedInstanceVar<C>
where
C: CurveGroup,
<C as Group>::ScalarField: Absorb,
{
/// hash implements the committed instance hash compatible with the native implementation from
/// CommittedInstance.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the
/// `CommittedInstance`.
pub fn hash(
self,
crh_params: &CRHParametersVar<CF1<C>>,
i: FpVar<CF1<C>>,
z_0: Vec<FpVar<CF1<C>>>,
z_i: Vec<FpVar<CF1<C>>>,
) -> Result<FpVar<CF1<C>>, SynthesisError> {
let input = vec![
vec![i],
z_0,
z_i,
vec![self.u],
self.x,
self.cmE.x,
self.cmE.y,
self.cmW.x,
self.cmW.y,
]
.concat();
CRHGadget::<C::ScalarField>::evaluate(crh_params, &input)
}
}
/// Implements the circuit that does the checks of the Non-Interactive Folding Scheme Verifier
/// described in section 4 of [Nova](https://eprint.iacr.org/2021/370.pdf), where the cmE & cmW checks are
/// delegated to the NIFSCycleFoldGadget.
pub struct NIFSGadget<C: CurveGroup> {
_c: PhantomData<C>,
}
impl<C: CurveGroup> NIFSGadget<C>
where
C: CurveGroup,
{
/// Implements the constraints for NIFS.V for u and x, since cm(E) and cm(W) are delegated to
/// the CycleFold circuit.
pub fn verify(
r: FpVar<CF1<C>>,
ci1: CommittedInstanceVar<C>,
ci2: CommittedInstanceVar<C>,
ci3: CommittedInstanceVar<C>,
) -> Result<Boolean<CF1<C>>, SynthesisError> {
// ensure that: ci3.u == ci1.u + r * ci2.u
let first_check = ci3.u.is_eq(&(ci1.u + r.clone() * ci2.u))?;
// ensure that: ci3.x == ci1.x + r * ci2.x
let x_rlc = ci1
.x
.iter()
.zip(ci2.x)
.map(|(a, b)| a + &r * &b)
.collect::<Vec<FpVar<CF1<C>>>>();
let second_check = x_rlc.is_eq(&ci3.x)?;
first_check.and(&second_check)
}
}
/// ChallengeGadget computes the RO challenge used for the Nova instances NIFS, it contains a
/// rust-native and a in-circuit compatible versions.
pub struct ChallengeGadget<C: CurveGroup> {
_c: PhantomData<C>,
}
impl<C: CurveGroup> ChallengeGadget<C>
where
C: CurveGroup,
<C as CurveGroup>::BaseField: PrimeField,
<C as Group>::ScalarField: Absorb,
{
pub fn get_challenge_native(
poseidon_config: &PoseidonConfig<C::ScalarField>,
u_i: CommittedInstance<C>,
U_i: CommittedInstance<C>,
cmT: C,
) -> Result<Vec<bool>, SynthesisError> {
let (u_cmE_x, u_cmE_y) = point_to_nonnative_limbs::<C>(u_i.cmE)?;
let (u_cmW_x, u_cmW_y) = point_to_nonnative_limbs::<C>(u_i.cmW)?;
let (U_cmE_x, U_cmE_y) = point_to_nonnative_limbs::<C>(U_i.cmE)?;
let (U_cmW_x, U_cmW_y) = point_to_nonnative_limbs::<C>(U_i.cmW)?;
let (cmT_x, cmT_y) = point_to_nonnative_limbs::<C>(cmT)?;
let mut sponge = PoseidonSponge::<C::ScalarField>::new(poseidon_config);
let input = vec![
vec![u_i.u],
u_i.x.clone(),
u_cmE_x,
u_cmE_y,
u_cmW_x,
u_cmW_y,
vec![U_i.u],
U_i.x.clone(),
U_cmE_x,
U_cmE_y,
U_cmW_x,
U_cmW_y,
cmT_x,
cmT_y,
]
.concat();
sponge.absorb(&input);
let bits = sponge.squeeze_bits(N_BITS_RO);
Ok(bits)
}
// compatible with the native get_challenge_native
pub fn get_challenge_gadget(
cs: ConstraintSystemRef<C::ScalarField>,
poseidon_config: &PoseidonConfig<C::ScalarField>,
u_i: CommittedInstanceVar<C>,
U_i: CommittedInstanceVar<C>,
cmT: NonNativeAffineVar<C::ScalarField>,
) -> Result<Vec<Boolean<C::ScalarField>>, SynthesisError> {
let mut sponge = PoseidonSpongeVar::<C::ScalarField>::new(cs, poseidon_config);
let input: Vec<FpVar<C::ScalarField>> = vec![
vec![u_i.u.clone()],
u_i.x.clone(),
u_i.cmE.x,
u_i.cmE.y,
u_i.cmW.x,
u_i.cmW.y,
vec![U_i.u.clone()],
U_i.x.clone(),
U_i.cmE.x,
U_i.cmE.y,
U_i.cmW.x,
U_i.cmW.y,
cmT.x,
cmT.y,
]
.concat();
sponge.absorb(&input)?;
let bits = sponge.squeeze_bits(N_BITS_RO)?;
Ok(bits)
}
}
/// AugmentedFCircuit implements the F' circuit (augmented F) defined in
/// [Nova](https://eprint.iacr.org/2021/370.pdf) together with the extra constraints defined in
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf).
#[derive(Debug, Clone)]
pub struct AugmentedFCircuit<
C1: CurveGroup,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<CF1<C1>>,
> where
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
pub _gc2: PhantomData<GC2>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
pub i: Option<CF1<C1>>,
pub z_0: Option<Vec<C1::ScalarField>>,
pub z_i: Option<Vec<C1::ScalarField>>,
pub u_i: Option<CommittedInstance<C1>>,
pub U_i: Option<CommittedInstance<C1>>,
pub U_i1: Option<CommittedInstance<C1>>,
pub cmT: Option<C1>,
pub F: FC, // F circuit
pub x: Option<CF1<C1>>, // public inputs (u_{i+1}.x)
// cyclefold verifier on C1
pub cf_u_i: Option<CommittedInstance<C2>>,
pub cf_U_i: Option<CommittedInstance<C2>>,
pub cf_U_i1: Option<CommittedInstance<C2>>,
pub cf_cmT: Option<C2>,
pub cf_r_nonnat: Option<C2::ScalarField>,
}
impl<C1: CurveGroup, C2: CurveGroup, GC2: CurveVar<C2, CF2<C2>>, FC: FCircuit<CF1<C1>>>
AugmentedFCircuit<C1, C2, GC2, FC>
where
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
pub fn empty(poseidon_config: &PoseidonConfig<CF1<C1>>, F_circuit: FC) -> Self {
Self {
_gc2: PhantomData,
poseidon_config: poseidon_config.clone(),
i: None,
z_0: None,
z_i: None,
u_i: None,
U_i: None,
U_i1: None,
cmT: None,
F: F_circuit,
x: None,
// cyclefold values
cf_u_i: None,
cf_U_i: None,
cf_U_i1: None,
cf_cmT: None,
cf_r_nonnat: None,
}
}
}
impl<C1, C2, GC2, FC> ConstraintSynthesizer<CF1<C1>> for AugmentedFCircuit<C1, C2, GC2, FC>
where
C1: CurveGroup,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<CF1<C1>>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let i = FpVar::<CF1<C1>>::new_witness(cs.clone(), || {
Ok(self.i.unwrap_or_else(CF1::<C1>::zero))
})?;
let z_0 = Vec::<FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let z_i = Vec::<FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let u_dummy_native = CommittedInstance::<C1>::dummy(1);
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i1 = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
})?;
let cmT =
NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?;
let x =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.x.unwrap_or_else(CF1::<C1>::zero)))?;
let crh_params = CRHParametersVar::<C1::ScalarField>::new_constant(
cs.clone(),
self.poseidon_config.clone(),
)?;
// get z_{i+1} from the F circuit
let z_i1 = self.F.generate_step_constraints(cs.clone(), z_i.clone())?;
let zero = FpVar::<CF1<C1>>::new_constant(cs.clone(), CF1::<C1>::zero())?;
let is_not_basecase = i.is_neq(&zero)?;
// 1. u_i.x == H(i, z_0, z_i, U_i)
let u_i_x = U_i
.clone()
.hash(&crh_params, i.clone(), z_0.clone(), z_i.clone())?;
// check that h == u_i.x
(u_i.x[0]).conditional_enforce_equal(&u_i_x, &is_not_basecase)?;
// 2. u_i.cmE==cm(0), u_i.u==1
let zero_x = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
cs.clone(),
C1::BaseField::zero(),
)?
.to_constraint_field()?;
let zero_y = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
cs.clone(),
C1::BaseField::one(),
)?
.to_constraint_field()?;
(u_i.cmE.x.is_eq(&zero_x)?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
(u_i.cmE.y.is_eq(&zero_y)?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
(u_i.u.is_one()?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
// 3. nifs.verify, checks that folding u_i & U_i obtains U_{i+1}.
// compute r = H(u_i, U_i, cmT)
let r_bits = ChallengeGadget::<C1>::get_challenge_gadget(
cs.clone(),
&self.poseidon_config,
u_i.clone(),
U_i.clone(),
cmT.clone(),
)?;
let r = Boolean::le_bits_to_fp_var(&r_bits)?;
// Notice that NIFSGadget::verify is not checking the folding of cmE & cmW, since it will
// be done on the other curve.
let nifs_check = NIFSGadget::<C1>::verify(r, u_i.clone(), U_i.clone(), U_i1.clone())?;
nifs_check.conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
// 4. u_{i+1}.x = H(i+1, z_0, z_i+1, U_{i+1}), this is the output of F'
let u_i1_x = U_i1.clone().hash(
&crh_params,
i + FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
)?;
u_i1_x.enforce_equal(&x)?;
// CycleFold part
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(CF_IO_LEN);
let cf_u_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_u_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_U_i1 = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i1.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf_cmT.unwrap_or_else(C2::zero)))?;
// cf_r_nonnat is an auxiliary input
let cf_r_nonnat =
NonNativeFieldVar::<C2::ScalarField, CF2<C2>>::new_witness(cs.clone(), || {
Ok(self.cf_r_nonnat.unwrap_or_else(C2::ScalarField::zero))
})?;
// check that cf_u_i.x == [u_i, U_i, U_{i+1}]
let incircuit_x = vec![
u_i.cmE.x, u_i.cmE.y, u_i.cmW.x, u_i.cmW.y, U_i.cmE.x, U_i.cmE.y, U_i.cmW.x, U_i.cmW.y,
U_i1.cmE.x, U_i1.cmE.y, U_i1.cmW.x, U_i1.cmW.y,
]
.concat();
let mut cf_u_i_x: Vec<FpVar<CF2<C2>>> = vec![];
for x_i in cf_u_i.x.iter() {
let mut x_fpvar = x_i.to_constraint_field()?;
cf_u_i_x.append(&mut x_fpvar);
}
cf_u_i_x.conditional_enforce_equal(&incircuit_x, &is_not_basecase)?;
// cf_r_bits is denoted by rho* in the paper
let cf_r_bits = CycleFoldChallengeGadget::<C2, GC2>::get_challenge_gadget(
cs.clone(),
&self.poseidon_config,
cf_u_i.clone(),
cf_U_i.clone(),
cf_cmT.clone(),
)?;
// assert that cf_r_bits == cf_r_nonnat converted to bits. cf_r_nonnat is just an auxiliary
// value used to compute RLC of NonNativeFieldVar values, since we can convert
// NonNativeFieldVar into Vec<Boolean>, but not in the other direction.
let cf_r_nonnat_bits = cf_r_nonnat.to_bits_le()?;
cf_r_bits.conditional_enforce_equal(&cf_r_nonnat_bits[..N_BITS_RO], &is_not_basecase)?;
// check cf_u_i.cmE=0, cf_u_i.u=1
(cf_u_i.cmE.is_zero()?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
(cf_u_i.u.is_one()?).conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
// check the fold of all the parameteres of the CycleFold instances, where the elliptic
// curve points relations are checked natively in Curve1 circuit (this one)
let v = NIFSFullGadget::<C2, GC2>::verify(
cf_r_bits,
cf_r_nonnat,
cf_cmT,
cf_U_i,
cf_u_i,
cf_U_i1,
)?;
v.conditional_enforce_equal(&Boolean::TRUE, &is_not_basecase)?;
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_ff::BigInteger;
use ark_pallas::{Fq, Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::{ConstraintLayer, ConstraintSystem, TracingMode};
use ark_std::One;
use ark_std::UniformRand;
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use tracing_subscriber::layer::SubscriberExt;
use crate::ccs::r1cs::{extract_r1cs, extract_w_x};
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs;
use crate::folding::nova::{
get_committed_instance_coordinates, nifs::NIFS, traits::NovaR1CS, Witness,
};
use crate::frontend::tests::CubicFCircuit;
use crate::transcript::poseidon::poseidon_test_config;
#[test]
fn test_committed_instance_var() {
let mut rng = ark_std::test_rng();
let ci = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
let cs = ConstraintSystem::<Fr>::new_ref();
let ciVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci.clone())).unwrap();
assert_eq!(ciVar.u.value().unwrap(), ci.u);
assert_eq!(ciVar.x.value().unwrap(), ci.x);
// the values cmE and cmW are checked in the CycleFold's circuit
// CommittedInstanceInCycleFoldVar in
// nova::cyclefold::tests::test_committed_instance_cyclefold_var
}
#[test]
fn test_nifs_gadget() {
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, _, r_Fr) = prepare_simple_fold_inputs();
let ci3_verifier = NIFS::<Projective, Pedersen<Projective>>::verify(r_Fr, &ci1, &ci2, &cmT);
assert_eq!(ci3_verifier, ci3);
let cs = ConstraintSystem::<Fr>::new_ref();
let rVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(r_Fr)).unwrap();
let ci1Var =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci1.clone()))
.unwrap();
let ci2Var =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci2.clone()))
.unwrap();
let ci3Var =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci3.clone()))
.unwrap();
let nifs_check = NIFSGadget::<Projective>::verify(
rVar.clone(),
ci1Var.clone(),
ci2Var.clone(),
ci3Var.clone(),
)
.unwrap();
nifs_check.enforce_equal(&Boolean::<Fr>::TRUE).unwrap();
assert!(cs.is_satisfied().unwrap());
}
#[test]
fn test_committed_instance_hash() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let i = Fr::from(3_u32);
let z_0 = vec![Fr::from(3_u32)];
let z_i = vec![Fr::from(3_u32)];
let ci = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
// compute the CommittedInstance hash natively
let h = ci
.hash(&poseidon_config, i, z_0.clone(), z_i.clone())
.unwrap();
let cs = ConstraintSystem::<Fr>::new_ref();
let iVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(i)).unwrap();
let z_0Var = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z_0.clone())).unwrap();
let z_iVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap();
let ciVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci.clone())).unwrap();
let crh_params = CRHParametersVar::<Fr>::new_constant(cs.clone(), poseidon_config).unwrap();
// compute the CommittedInstance hash in-circuit
let hVar = ciVar.hash(&crh_params, iVar, z_0Var, z_iVar).unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
assert_eq!(hVar.value().unwrap(), h);
}
// checks that the gadget and native implementations of the challenge computation matcbh
#[test]
fn test_challenge_gadget() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let u_i = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
let U_i = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
let cmT = Projective::rand(&mut rng);
// compute the challenge natively
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
&poseidon_config,
u_i.clone(),
U_i.clone(),
cmT,
)
.unwrap();
let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
let cs = ConstraintSystem::<Fr>::new_ref();
let u_iVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(u_i.clone()))
.unwrap();
let U_iVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(U_i.clone()))
.unwrap();
let cmTVar = NonNativeAffineVar::<Fr>::new_witness(cs.clone(), || Ok(cmT)).unwrap();
// compute the challenge in-circuit
let r_bitsVar = ChallengeGadget::<Projective>::get_challenge_gadget(
cs.clone(),
&poseidon_config,
u_iVar,
U_iVar,
cmTVar,
)
.unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap();
assert_eq!(rVar.value().unwrap(), r);
assert_eq!(r_bitsVar.value().unwrap(), r_bits);
}
#[test]
/// test_augmented_f_circuit folds the CubicFCircuit circuit in multiple iterations, feeding the
/// values into the AugmentedFCircuit.
fn test_augmented_f_circuit() {
let mut layer = ConstraintLayer::default();
layer.mode = TracingMode::OnlyConstraints;
let subscriber = tracing_subscriber::Registry::default().with(layer);
let _guard = tracing::subscriber::set_default(subscriber);
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
// compute z vector for the initial instance
let cs = ConstraintSystem::<Fr>::new_ref();
// prepare the circuit to obtain its R1CS
let F_circuit = CubicFCircuit::<Fr>::new(());
let mut augmented_F_circuit =
AugmentedFCircuit::<Projective, Projective2, GVar2, CubicFCircuit<Fr>>::empty(
&poseidon_config,
F_circuit,
);
augmented_F_circuit
.generate_constraints(cs.clone())
.unwrap();
cs.finalize();
println!("num_constraints={:?}", cs.num_constraints());
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fr>(&cs);
let (w, x) = extract_w_x::<Fr>(&cs);
assert_eq!(1 + x.len() + w.len(), r1cs.A.n_cols);
assert_eq!(r1cs.l, x.len());
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_rows);
// first step, set z_i=z_0=3 and z_{i+1}=35 (initial values)
let z_0 = vec![Fr::from(3_u32)];
let mut z_i = z_0.clone();
let mut z_i1 = vec![Fr::from(35_u32)];
let w_dummy = Witness::<Projective>::new(vec![Fr::zero(); w.len()], r1cs.A.n_rows);
let u_dummy = CommittedInstance::<Projective>::dummy(x.len());
// W_i is a 'dummy witness', all zeroes, but with the size corresponding to the R1CS that
// we're working with.
// set U_i <-- dummy instance
let mut W_i = w_dummy.clone();
let mut U_i = u_dummy.clone();
r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap();
let mut w_i = w_dummy.clone();
let mut u_i = u_dummy.clone();
let (mut W_i1, mut U_i1, mut cmT): (
Witness<Projective>,
CommittedInstance<Projective>,
Projective,
) = (w_dummy.clone(), u_dummy.clone(), Projective::generator());
// as expected, dummy instances pass the relaxed_r1cs check
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
let mut i = Fr::zero();
let mut u_i1_x: Fr;
for _ in 0..4 {
if i == Fr::zero() {
// base case: i=0, z_i=z_0, U_i = U_d := dummy instance
// u_1.x = H(1, z_0, z_i, U_i)
u_i1_x = U_i
.hash(&poseidon_config, Fr::one(), z_0.clone(), z_i1.clone())
.unwrap();
// base case
augmented_F_circuit =
AugmentedFCircuit::<Projective, Projective2, GVar2, CubicFCircuit<Fr>> {
_gc2: PhantomData,
poseidon_config: poseidon_config.clone(),
i: Some(i), // = 0
z_0: Some(z_0.clone()), // = z_i=3
z_i: Some(z_i.clone()),
u_i: Some(u_i.clone()), // = dummy
U_i: Some(U_i.clone()), // = dummy
U_i1: Some(U_i1.clone()), // = dummy
cmT: Some(cmT),
F: F_circuit,
x: Some(u_i1_x),
// cyclefold instances (not tested in this test)
cf_u_i: None,
cf_U_i: None,
cf_U_i1: None,
cf_cmT: None,
cf_r_nonnat: None,
};
} else {
r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap();
r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap();
// U_{i+1}
let T: Vec<Fr>;
(T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
&pedersen_params,
&r1cs,
&w_i,
&u_i,
&W_i,
&U_i,
)
.unwrap();
// get challenge r
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
&poseidon_config,
u_i.clone(),
U_i.clone(),
cmT,
)
.unwrap();
let r_Fr = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
(W_i1, U_i1) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT,
)
.unwrap();
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
// folded instance output (public input, x)
// u_{i+1}.x = H(i+1, z_0, z_{i+1}, U_{i+1})
u_i1_x = U_i1
.hash(&poseidon_config, i + Fr::one(), z_0.clone(), z_i1.clone())
.unwrap();
// set up dummy cyclefold instances just for the sake of this test. Warning, this
// is only because we are in a test were we're not testing the cyclefold side of
// things.
let cf_W_i = Witness::<Projective2>::new(vec![Fq::zero(); 1], 1);
let cf_U_i = CommittedInstance::<Projective2>::dummy(CF_IO_LEN);
let cf_u_i_x = [
get_committed_instance_coordinates(&u_i),
get_committed_instance_coordinates(&U_i),
get_committed_instance_coordinates(&U_i1),
]
.concat();
let cf_u_i = CommittedInstance::<Projective2> {
cmE: cf_U_i.cmE,
u: Fq::one(),
cmW: cf_U_i.cmW,
x: cf_u_i_x,
};
let cf_w_i = cf_W_i.clone();
let (cf_T, cf_cmT): (Vec<Fq>, Projective2) =
(vec![Fq::zero(); cf_W_i.E.len()], Projective2::zero());
let cf_r_bits =
CycleFoldChallengeGadget::<Projective2, GVar2>::get_challenge_native(
&poseidon_config,
cf_u_i.clone(),
cf_U_i.clone(),
cf_cmT,
)
.unwrap();
let cf_r_Fq = Fq::from_bigint(BigInteger::from_bits_le(&cf_r_bits)).unwrap();
let (_, cf_U_i1) = NIFS::<Projective2, Pedersen<Projective2>>::fold_instances(
cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT,
)
.unwrap();
augmented_F_circuit =
AugmentedFCircuit::<Projective, Projective2, GVar2, CubicFCircuit<Fr>> {
_gc2: PhantomData,
poseidon_config: poseidon_config.clone(),
i: Some(i),
z_0: Some(z_0.clone()),
z_i: Some(z_i.clone()),
u_i: Some(u_i),
U_i: Some(U_i.clone()),
U_i1: Some(U_i1.clone()),
cmT: Some(cmT),
F: F_circuit,
x: Some(u_i1_x),
cf_u_i: Some(cf_u_i),
cf_U_i: Some(cf_U_i),
cf_U_i1: Some(cf_U_i1),
cf_cmT: Some(cf_cmT),
cf_r_nonnat: Some(cf_r_Fq),
};
}
let cs = ConstraintSystem::<Fr>::new_ref();
augmented_F_circuit
.generate_constraints(cs.clone())
.unwrap();
let is_satisfied = cs.is_satisfied().unwrap();
if !is_satisfied {
dbg!(cs.which_is_unsatisfied().unwrap());
}
assert!(is_satisfied);
cs.finalize();
let cs = cs.into_inner().unwrap();
let (w_i1, x_i1) = extract_w_x::<Fr>(&cs);
assert_eq!(x_i1.len(), 1);
assert_eq!(x_i1[0], u_i1_x);
// compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we
// assign them directly to w_i, u_i.
w_i = Witness::<Projective>::new(w_i1.clone(), r1cs.A.n_rows);
u_i = w_i
.commit::<Pedersen<Projective>>(&pedersen_params, vec![u_i1_x])
.unwrap();
r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap();
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
// set values for next iteration
i += Fr::one();
// advance the F circuit state
z_i = z_i1.clone();
z_i1 = F_circuit.step_native(z_i.clone()).unwrap();
U_i = U_i1.clone();
W_i = W_i1.clone();
}
}
}

View File

@@ -0,0 +1,567 @@
/// contains [CycleFold](https://eprint.iacr.org/2023/1192.pdf) related circuits
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
bits::uint8::UInt8,
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, nonnative::NonNativeFieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToBytesGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_serialize::CanonicalSerialize;
use ark_std::fmt::Debug;
use ark_std::Zero;
use core::{borrow::Borrow, marker::PhantomData};
use super::circuits::CF2;
use super::CommittedInstance;
use crate::constants::N_BITS_RO;
use crate::Error;
// publi inputs length for the CycleFoldCircuit, |[u_i, U_i, U_{i+1}]|
pub const CF_IO_LEN: usize = 12;
/// CycleFoldCommittedInstanceVar is the CycleFold CommittedInstance representation in the Nova
/// circuit.
#[derive(Debug, Clone)]
pub struct CycleFoldCommittedInstanceVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
where
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
_c: PhantomData<C>,
pub cmE: GC,
pub u: NonNativeFieldVar<C::ScalarField, CF2<C>>,
pub cmW: GC,
pub x: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>,
}
impl<C, GC> AllocVar<CommittedInstance<C>, CF2<C>> for CycleFoldCommittedInstanceVar<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
fn new_variable<T: Borrow<CommittedInstance<C>>>(
cs: impl Into<Namespace<CF2<C>>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?;
let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?;
let u = NonNativeFieldVar::<C::ScalarField, CF2<C>>::new_variable(
cs.clone(),
|| Ok(val.borrow().u),
mode,
)?;
let x = Vec::<NonNativeFieldVar<C::ScalarField, CF2<C>>>::new_variable(
cs.clone(),
|| Ok(val.borrow().x.clone()),
mode,
)?;
Ok(Self {
_c: PhantomData,
cmE,
u,
cmW,
x,
})
})
}
}
/// CommittedInstanceInCycleFoldVar represents the Nova CommittedInstance in the CycleFold circuit,
/// where the commitments to E and W (cmW and cmW) from the CommittedInstance on the E2,
/// represented as native points, which are folded on the auxiliary curve constraints field (E2::Fr
/// = E1::Fq).
#[derive(Debug, Clone)]
pub struct CommittedInstanceInCycleFoldVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
where
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
_c: PhantomData<C>,
pub cmE: GC,
pub cmW: GC,
}
impl<C, GC> AllocVar<CommittedInstance<C>, CF2<C>> for CommittedInstanceInCycleFoldVar<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF2<C>>,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
fn new_variable<T: Borrow<CommittedInstance<C>>>(
cs: impl Into<Namespace<CF2<C>>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?;
let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?;
Ok(Self {
_c: PhantomData,
cmE,
cmW,
})
})
}
}
/// NIFSinCycleFoldGadget performs the Nova NIFS.V elliptic curve points relation checks in the other
/// curve (natively) following [CycleFold](https://eprint.iacr.org/2023/1192.pdf).
pub struct NIFSinCycleFoldGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
_c: PhantomData<C>,
_gc: PhantomData<GC>,
}
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> NIFSinCycleFoldGadget<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
pub fn verify(
r_bits: Vec<Boolean<CF2<C>>>,
cmT: GC,
ci1: CommittedInstanceInCycleFoldVar<C, GC>,
ci2: CommittedInstanceInCycleFoldVar<C, GC>,
ci3: CommittedInstanceInCycleFoldVar<C, GC>,
) -> Result<Boolean<CF2<C>>, SynthesisError> {
// cm(E) check: ci3.cmE == ci1.cmE + r * cmT + r^2 * ci2.cmE
let first_check = ci3.cmE.is_eq(
&((ci2.cmE.scalar_mul_le(r_bits.iter())? + cmT).scalar_mul_le(r_bits.iter())?
+ ci1.cmE),
)?;
// cm(W) check: ci3.cmW == ci1.cmW + r * ci2.cmW
let second_check = ci3
.cmW
.is_eq(&(ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?))?;
first_check.and(&second_check)
}
}
/// This is the gadget used in the AugmentedFCircuit to verify the CycleFold instances folding,
/// which checks the correct RLC of u,x,cmE,cmW (hence the name containing 'Full', since it checks
/// all the RLC values, not only the native ones). It assumes that ci2.cmE=0, ci2.u=1.
pub struct NIFSFullGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
_c: PhantomData<C>,
_gc: PhantomData<GC>,
}
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> NIFSFullGadget<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
pub fn verify(
r_bits: Vec<Boolean<CF2<C>>>,
r_nonnat: NonNativeFieldVar<C::ScalarField, CF2<C>>,
cmT: GC,
// ci1 is assumed to be always with cmE=0, u=1 (checks done previous to this method)
ci1: CycleFoldCommittedInstanceVar<C, GC>,
ci2: CycleFoldCommittedInstanceVar<C, GC>,
ci3: CycleFoldCommittedInstanceVar<C, GC>,
) -> Result<Boolean<CF2<C>>, SynthesisError> {
// cm(E) check: ci3.cmE == ci1.cmE + r * cmT (ci2.cmE=0)
let first_check = ci3
.cmE
.is_eq(&(cmT.scalar_mul_le(r_bits.iter())? + ci1.cmE))?;
// cm(W) check: ci3.cmW == ci1.cmW + r * ci2.cmW
let second_check = ci3
.cmW
.is_eq(&(ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?))?;
let u_rlc: NonNativeFieldVar<C::ScalarField, CF2<C>> = ci1.u + r_nonnat.clone();
let third_check = u_rlc.is_eq(&ci3.u)?;
// ensure that: ci3.x == ci1.x + r * ci2.x
let x_rlc: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>> = ci1
.x
.iter()
.zip(ci2.x)
.map(|(a, b)| a + &r_nonnat * &b)
.collect::<Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>>();
let fourth_check = x_rlc.is_eq(&ci3.x)?;
first_check
.and(&second_check)?
.and(&third_check)?
.and(&fourth_check)
}
}
/// ChallengeGadget computes the RO challenge used for the CycleFold instances NIFS, it contains a
/// rust-native and a in-circuit compatible versions.
pub struct CycleFoldChallengeGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
_c: PhantomData<C>, // Nova's Curve2, the one used for the CycleFold circuit
_gc: PhantomData<GC>,
}
impl<C, GC> CycleFoldChallengeGadget<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF2<C>>,
<C as CurveGroup>::BaseField: PrimeField,
<C as CurveGroup>::BaseField: Absorb,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
pub fn get_challenge_native(
poseidon_config: &PoseidonConfig<C::BaseField>,
u_i: CommittedInstance<C>,
U_i: CommittedInstance<C>,
cmT: C,
) -> Result<Vec<bool>, Error> {
let mut sponge = PoseidonSponge::<C::BaseField>::new(poseidon_config);
let u_i_cmE_bytes = point_to_bytes(u_i.cmE);
let u_i_cmW_bytes = point_to_bytes(u_i.cmW);
let U_i_cmE_bytes = point_to_bytes(U_i.cmE);
let U_i_cmW_bytes = point_to_bytes(U_i.cmW);
let cmT_bytes = point_to_bytes(cmT);
let mut u_i_u_bytes = Vec::new();
u_i.u.serialize_uncompressed(&mut u_i_u_bytes)?;
let mut u_i_x_bytes = Vec::new();
u_i.x.serialize_uncompressed(&mut u_i_x_bytes)?;
u_i_x_bytes = u_i_x_bytes[8..].to_vec();
let mut U_i_u_bytes = Vec::new();
U_i.u.serialize_uncompressed(&mut U_i_u_bytes)?;
let mut U_i_x_bytes = Vec::new();
U_i.x.serialize_uncompressed(&mut U_i_x_bytes)?;
U_i_x_bytes = U_i_x_bytes[8..].to_vec();
let input: Vec<u8> = [
u_i_cmE_bytes,
u_i_u_bytes,
u_i_cmW_bytes,
u_i_x_bytes,
U_i_cmE_bytes,
U_i_u_bytes,
U_i_cmW_bytes,
U_i_x_bytes,
cmT_bytes,
]
.concat();
sponge.absorb(&input);
let bits = sponge.squeeze_bits(N_BITS_RO);
Ok(bits)
}
// compatible with the native get_challenge_native
pub fn get_challenge_gadget(
cs: ConstraintSystemRef<C::BaseField>,
poseidon_config: &PoseidonConfig<C::BaseField>,
u_i: CycleFoldCommittedInstanceVar<C, GC>,
U_i: CycleFoldCommittedInstanceVar<C, GC>,
cmT: GC,
) -> Result<Vec<Boolean<C::BaseField>>, SynthesisError> {
let mut sponge = PoseidonSpongeVar::<C::BaseField>::new(cs, poseidon_config);
let u_i_x_bytes: Vec<UInt8<CF2<C>>> = u_i
.x
.iter()
.flat_map(|e| e.to_bytes().unwrap_or(vec![]))
.collect::<Vec<UInt8<CF2<C>>>>();
let U_i_x_bytes: Vec<UInt8<CF2<C>>> = U_i
.x
.iter()
.flat_map(|e| e.to_bytes().unwrap_or(vec![]))
.collect::<Vec<UInt8<CF2<C>>>>();
let input: Vec<UInt8<CF2<C>>> = [
u_i.cmE.to_bytes()?,
u_i.u.to_bytes()?,
u_i.cmW.to_bytes()?,
u_i_x_bytes,
U_i.cmE.to_bytes()?,
U_i.u.to_bytes()?,
U_i.cmW.to_bytes()?,
U_i_x_bytes,
cmT.to_bytes()?,
// TODO instead of bytes, use field elements, but needs x,y coordinates from
// u_i.{cmE,cmW}, U_i.{cmE,cmW}, cmT. Depends exposing x,y coordinates of GC. Issue to
// keep track of this:
// https://github.com/privacy-scaling-explorations/folding-schemes/issues/44
]
.concat();
sponge.absorb(&input)?;
let bits = sponge.squeeze_bits(N_BITS_RO)?;
Ok(bits)
}
}
/// returns the bytes being compatible with the ark_r1cs_std `.to_bytes` approach
fn point_to_bytes<C: CurveGroup>(p: C) -> Vec<u8> {
let l = p.uncompressed_size();
let mut b = Vec::new();
p.serialize_uncompressed(&mut b).unwrap();
b[l - 1] = 0;
if p.is_zero() {
b[l / 2] = 1;
b[l - 1] = 1;
}
b
}
/// CycleFoldCircuit contains the constraints that check the correct fold of the committed
/// instances from Curve1. Namely, it checks the random linear combinations of the elliptic curve
/// (Curve1) points of u_i, U_i leading to U_{i+1}
#[derive(Debug, Clone)]
pub struct CycleFoldCircuit<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
pub _gc: PhantomData<GC>,
pub r_bits: Option<Vec<bool>>,
pub cmT: Option<C>,
// u_i,U_i,U_i1 are the nova instances from AugmentedFCircuit which will be (their elliptic
// curve points) checked natively in CycleFoldCircuit
pub u_i: Option<CommittedInstance<C>>,
pub U_i: Option<CommittedInstance<C>>,
pub U_i1: Option<CommittedInstance<C>>,
pub x: Option<Vec<CF2<C>>>, // public inputs (cf_u_{i+1}.x)
}
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> CycleFoldCircuit<C, GC> {
pub fn empty() -> Self {
Self {
_gc: PhantomData,
r_bits: None,
cmT: None,
u_i: None,
U_i: None,
U_i1: None,
x: None,
}
}
}
impl<C, GC> ConstraintSynthesizer<CF2<C>> for CycleFoldCircuit<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF2<C>>) -> Result<(), SynthesisError> {
let r_bits: Vec<Boolean<CF2<C>>> = Vec::new_witness(cs.clone(), || {
Ok(self.r_bits.unwrap_or(vec![false; N_BITS_RO]))
})?;
let cmT = GC::new_witness(cs.clone(), || Ok(self.cmT.unwrap_or(C::zero())))?;
let u_dummy_native = CommittedInstance::<C>::dummy(1);
let u_i = CommittedInstanceInCycleFoldVar::<C, GC>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i = CommittedInstanceInCycleFoldVar::<C, GC>::new_witness(cs.clone(), || {
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i1 = CommittedInstanceInCycleFoldVar::<C, GC>::new_witness(cs.clone(), || {
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
})?;
let _x = Vec::<FpVar<CF2<C>>>::new_input(cs.clone(), || {
Ok(self.x.unwrap_or(vec![CF2::<C>::zero(); CF_IO_LEN]))
})?;
#[cfg(test)]
assert_eq!(_x.len(), CF_IO_LEN); // non-constrained sanity check
// fold the original Nova instances natively in CycleFold
let v =
NIFSinCycleFoldGadget::<C, GC>::verify(r_bits.clone(), cmT, u_i.clone(), U_i, U_i1)?;
v.enforce_equal(&Boolean::TRUE)?;
// check that x == [u_i, U_i, U_{i+1}], check that the cmW & cmW from u_i, U_i, U_{i+1} in
// the CycleFoldCircuit are the sames used in the public inputs 'x', which come from the
// AugmentedFCircuit.
// TODO: Issue to keep track of this: https://github.com/privacy-scaling-explorations/folding-schemes/issues/44
// and https://github.com/privacy-scaling-explorations/folding-schemes/issues/48
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_ff::BigInteger;
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;
use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs;
use crate::transcript::poseidon::poseidon_test_config;
#[test]
fn test_committed_instance_cyclefold_var() {
let mut rng = ark_std::test_rng();
let ci = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
// check the instantiation of the CycleFold side:
let cs = ConstraintSystem::<Fq>::new_ref();
let ciVar =
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci.clone())
})
.unwrap();
assert_eq!(ciVar.cmE.value().unwrap(), ci.cmE);
assert_eq!(ciVar.cmW.value().unwrap(), ci.cmW);
}
#[test]
fn test_nifs_gadget_cyclefold() {
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, _) = prepare_simple_fold_inputs();
// cs is the Constraint System on the Curve Cycle auxiliary curve constraints field
// (E2::Fr)
let cs = ConstraintSystem::<Fq>::new_ref();
let r_bitsVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap();
let ci1Var =
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci1.clone())
})
.unwrap();
let ci2Var =
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci2.clone())
})
.unwrap();
let ci3Var =
CommittedInstanceInCycleFoldVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci3.clone())
})
.unwrap();
let nifs_cf_check = NIFSinCycleFoldGadget::<Projective, GVar>::verify(
r_bitsVar, cmTVar, ci1Var, ci2Var, ci3Var,
)
.unwrap();
nifs_cf_check.enforce_equal(&Boolean::<Fq>::TRUE).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
#[test]
fn test_nifs_full_gadget() {
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, r_Fr) = prepare_simple_fold_inputs();
let cs = ConstraintSystem::<Fq>::new_ref();
let r_nonnatVar =
NonNativeFieldVar::<Fr, Fq>::new_witness(cs.clone(), || Ok(r_Fr)).unwrap();
let r_bitsVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
let ci1Var =
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci1.clone())
})
.unwrap();
let ci2Var =
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci2.clone())
})
.unwrap();
let ci3Var =
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci3.clone())
})
.unwrap();
let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap();
let nifs_check = NIFSFullGadget::<Projective, GVar>::verify(
r_bitsVar,
r_nonnatVar,
cmTVar,
ci1Var,
ci2Var,
ci3Var,
)
.unwrap();
nifs_check.enforce_equal(&Boolean::<Fq>::TRUE).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
#[test]
fn test_cyclefold_challenge_gadget() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fq>();
let u_i = CommittedInstance::<Projective> {
cmE: Projective::zero(), // zero on purpose, so we test also the zero point case
u: Fr::zero(),
cmW: Projective::rand(&mut rng),
x: std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(CF_IO_LEN)
.collect(),
};
let U_i = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(CF_IO_LEN)
.collect(),
};
let cmT = Projective::rand(&mut rng);
// compute the challenge natively
let r_bits = CycleFoldChallengeGadget::<Projective, GVar>::get_challenge_native(
&poseidon_config,
u_i.clone(),
U_i.clone(),
cmT,
)
.unwrap();
let cs = ConstraintSystem::<Fq>::new_ref();
let u_iVar =
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(u_i.clone())
})
.unwrap();
let U_iVar =
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(U_i.clone())
})
.unwrap();
let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap();
let r_bitsVar = CycleFoldChallengeGadget::<Projective, GVar>::get_challenge_gadget(
cs.clone(),
&poseidon_config,
u_iVar,
U_iVar,
cmTVar,
)
.unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap();
let r = Fq::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
assert_eq!(rVar.value().unwrap(), r);
assert_eq!(r_bitsVar.value().unwrap(), r_bits);
}
}

View File

@@ -0,0 +1,205 @@
/// This file implements the onchain (Ethereum's EVM) decider.
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar};
use ark_snark::SNARK;
use ark_std::rand::CryptoRng;
use ark_std::rand::RngCore;
use core::marker::PhantomData;
pub use super::decider_eth_circuit::DeciderEthCircuit;
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentProver};
use crate::folding::circuits::nonnative::point_to_nonnative_limbs_custom_opt;
use crate::folding::nova::{circuits::CF2, CommittedInstance, Nova};
use crate::frontend::FCircuit;
use crate::Error;
use crate::{Decider as DeciderTrait, FoldingScheme};
use ark_r1cs_std::fields::nonnative::params::OptimizationType;
/// Onchain Decider, for ethereum use cases
#[derive(Clone, Debug)]
pub struct Decider<C1, GC1, C2, GC2, FC, CP1, CP2, S, FS> {
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
_fc: PhantomData<FC>,
_cp1: PhantomData<CP1>,
_cp2: PhantomData<CP2>,
_s: PhantomData<S>,
_fs: PhantomData<FS>,
}
impl<C1, GC1, C2, GC2, FC, CP1, CP2, S, FS> DeciderTrait<C1, C2, FC, FS>
for Decider<C1, GC1, C2, GC2, FC, CP1, CP2, S, FS>
where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CP1: CommitmentProver<C1>,
// enforce that the CP2 is Pedersen commitment, since we're at Ethereum's EVM decider
CP2: CommitmentProver<C2, Params = PedersenParams<C2>>,
S: SNARK<C1::ScalarField>,
FS: FoldingScheme<C1, C2, FC>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
// constrain FS into Nova, since this is a Decider specificly for Nova
Nova<C1, GC1, C2, GC2, FC, CP1, CP2>: From<FS>,
{
type ProverParam = S::ProvingKey;
type Proof = S::Proof;
type VerifierParam = S::VerifyingKey;
type PublicInput = Vec<C1::ScalarField>;
type CommittedInstanceWithWitness = ();
type CommittedInstance = CommittedInstance<C1>;
fn prove(
pp: &Self::ProverParam,
mut rng: impl RngCore + CryptoRng,
folding_scheme: FS,
) -> Result<Self::Proof, Error> {
let circuit =
DeciderEthCircuit::<C1, GC1, C2, GC2, CP1, CP2>::from_nova::<FC>(folding_scheme.into());
let proof = S::prove(pp, circuit.clone(), &mut rng).unwrap();
Ok(proof)
}
fn verify(
vp: &Self::VerifierParam,
i: C1::ScalarField,
z_0: Vec<C1::ScalarField>,
z_i: Vec<C1::ScalarField>,
running_instance: &Self::CommittedInstance,
proof: Self::Proof,
) -> Result<bool, Error> {
let (cmE_x, cmE_y) = point_to_nonnative_limbs_custom_opt::<C1>(
running_instance.cmE,
OptimizationType::Constraints,
)?;
let (cmW_x, cmW_y) = point_to_nonnative_limbs_custom_opt::<C1>(
running_instance.cmW,
OptimizationType::Constraints,
)?;
let public_input: Vec<C1::ScalarField> = vec![
vec![i],
z_0,
z_i,
vec![running_instance.u],
running_instance.x.clone(),
cmE_x,
cmE_y,
cmW_x,
cmW_y,
]
.concat();
S::verify(vp, &public_input, &proof).map_err(|e| Error::Other(e.to_string()))
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_groth16::Groth16;
use ark_mnt4_298::{constraints::G1Var as GVar, Fr, G1Projective as Projective, MNT4_298};
use ark_mnt6_298::{constraints::G1Var as GVar2, G1Projective as Projective2};
use std::time::Instant;
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::{get_pedersen_params_len, ProverParams};
use crate::frontend::tests::CubicFCircuit;
use crate::transcript::poseidon::poseidon_test_config;
// Note: since we're testing a big circuit, this test takes a bit more of computation and time,
// do not run in the normal CI.
// To run the test use `--ignored` flag, eg. `cargo test -- --ignored`
#[test]
#[ignore]
fn test_decider() {
type NOVA = Nova<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
>;
type DECIDER = Decider<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
Groth16<MNT4_298>, // here we define the Snark to use in the decider
NOVA, // here we define the FoldingScheme to use
>;
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(());
let z_0 = vec![Fr::from(3_u32)];
let (cm_len, cf_cm_len) =
get_pedersen_params_len::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>>(
&poseidon_config,
F_circuit,
)
.unwrap();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
let start = Instant::now();
let prover_params =
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
poseidon_config: poseidon_config.clone(),
cm_params: pedersen_params,
cf_cm_params: cf_pedersen_params,
};
println!("generating pedersen params, {:?}", start.elapsed());
// use Nova as FoldingScheme
let start = Instant::now();
let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap();
println!("Nova initialized, {:?}", start.elapsed());
let start = Instant::now();
nova.prove_step().unwrap();
println!("prove_step, {:?}", start.elapsed());
// generate Groth16 setup
let circuit = DeciderEthCircuit::<
Projective,
GVar,
Projective2,
GVar2,
Pedersen<Projective>,
Pedersen<Projective2>,
>::from_nova::<CubicFCircuit<Fr>>(nova.clone());
let mut rng = rand::rngs::OsRng;
let start = Instant::now();
let (pk, vk) =
Groth16::<MNT4_298>::circuit_specific_setup(circuit.clone(), &mut rng).unwrap();
println!("Groth16 setup, {:?}", start.elapsed());
// decider proof generation
let start = Instant::now();
let proof = DECIDER::prove(&pk, rng, nova.clone()).unwrap();
println!("Decider Groth16 prove, {:?}", start.elapsed());
// decider proof verification
let verified = DECIDER::verify(&vk, nova.i, nova.z_0, nova.z_i, &nova.U_i, proof).unwrap();
assert!(verified);
}
}

View File

@@ -0,0 +1,686 @@
/// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases,
/// other more efficient approaches can be used.
use ark_crypto_primitives::crh::poseidon::constraints::CRHParametersVar;
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToConstraintFieldGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_std::{One, Zero};
use core::{borrow::Borrow, marker::PhantomData};
use crate::ccs::r1cs::R1CS;
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentProver};
use crate::folding::nova::{
circuits::{CommittedInstanceVar, CF1, CF2},
CommittedInstance, Nova, Witness,
};
use crate::frontend::FCircuit;
use crate::utils::gadgets::{
hadamard, mat_vec_mul_sparse, vec_add, vec_scalar_mul, SparseMatrixVar,
};
#[derive(Debug, Clone)]
pub struct RelaxedR1CSGadget<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> {
_f: PhantomData<F>,
_cf: PhantomData<CF>,
_fv: PhantomData<FV>,
}
impl<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> RelaxedR1CSGadget<F, CF, FV> {
/// performs the RelaxedR1CS check (Az∘Bz==uCz+E)
pub fn check(
r1cs: R1CSVar<F, CF, FV>,
E: Vec<FV>,
u: FV,
z: Vec<FV>,
) -> Result<(), SynthesisError> {
let Az = mat_vec_mul_sparse(r1cs.A, z.clone());
let Bz = mat_vec_mul_sparse(r1cs.B, z.clone());
let Cz = mat_vec_mul_sparse(r1cs.C, z.clone());
let uCz = vec_scalar_mul(&Cz, &u);
let uCzE = vec_add(&uCz, &E)?;
let AzBz = hadamard(&Az, &Bz)?;
for i in 0..AzBz.len() {
AzBz[i].enforce_equal(&uCzE[i].clone())?;
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct R1CSVar<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> {
_f: PhantomData<F>,
_cf: PhantomData<CF>,
_fv: PhantomData<FV>,
pub A: SparseMatrixVar<F, CF, FV>,
pub B: SparseMatrixVar<F, CF, FV>,
pub C: SparseMatrixVar<F, CF, FV>,
}
impl<F, CF, FV> AllocVar<R1CS<F>, CF> for R1CSVar<F, CF, FV>
where
F: PrimeField,
CF: PrimeField,
FV: FieldVar<F, CF>,
{
fn new_variable<T: Borrow<R1CS<F>>>(
cs: impl Into<Namespace<CF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let A = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().A)?;
let B = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().B)?;
let C = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().C)?;
Ok(Self {
_f: PhantomData,
_cf: PhantomData,
_fv: PhantomData,
A,
B,
C,
})
})
}
}
/// In-circuit representation of the Witness associated to the CommittedInstance.
#[derive(Debug, Clone)]
pub struct WitnessVar<C: CurveGroup> {
pub E: Vec<FpVar<C::ScalarField>>,
pub rE: FpVar<C::ScalarField>,
pub W: Vec<FpVar<C::ScalarField>>,
pub rW: FpVar<C::ScalarField>,
}
impl<C> AllocVar<Witness<C>, CF1<C>> for WitnessVar<C>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{
fn new_variable<T: Borrow<Witness<C>>>(
cs: impl Into<Namespace<CF1<C>>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let E: Vec<FpVar<C::ScalarField>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().E.clone()), mode)?;
let rE =
FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(val.borrow().rE), mode)?;
let W: Vec<FpVar<C::ScalarField>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().W.clone()), mode)?;
let rW =
FpVar::<C::ScalarField>::new_variable(cs.clone(), || Ok(val.borrow().rW), mode)?;
Ok(Self { E, rE, W, rW })
})
}
}
/// In-circuit representation of the Witness associated to the CommittedInstance, but with
/// non-native representation, since it is used to represent the CycleFold witness.
#[derive(Debug, Clone)]
pub struct CycleFoldWitnessVar<C: CurveGroup> {
pub E: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>,
pub rE: NonNativeFieldVar<C::ScalarField, CF2<C>>,
pub W: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>>,
pub rW: NonNativeFieldVar<C::ScalarField, CF2<C>>,
}
impl<C> AllocVar<Witness<C>, CF2<C>> for CycleFoldWitnessVar<C>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{
fn new_variable<T: Borrow<Witness<C>>>(
cs: impl Into<Namespace<CF2<C>>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let E: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().E.clone()), mode)?;
let rE = NonNativeFieldVar::<C::ScalarField, CF2<C>>::new_variable(
cs.clone(),
|| Ok(val.borrow().rE),
mode,
)?;
let W: Vec<NonNativeFieldVar<C::ScalarField, CF2<C>>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().W.clone()), mode)?;
let rW = NonNativeFieldVar::<C::ScalarField, CF2<C>>::new_variable(
cs.clone(),
|| Ok(val.borrow().rW),
mode,
)?;
Ok(Self { E, rE, W, rW })
})
}
}
/// Circuit that implements the in-circuit checks needed for the onchain (Ethereum's EVM)
/// verification.
#[derive(Clone, Debug)]
pub struct DeciderEthCircuit<C1, GC1, C2, GC2, CP1, CP2>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
CP1: CommitmentProver<C1>,
CP2: CommitmentProver<C2>,
{
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
_cp1: PhantomData<CP1>,
_cp2: PhantomData<CP2>,
/// E vector's length of the Nova instance witness
pub E_len: usize,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// R1CS of the Augmented Function circuit
pub r1cs: R1CS<C1::ScalarField>,
/// R1CS of the CycleFold circuit
pub cf_r1cs: R1CS<C2::ScalarField>,
/// CycleFold PedersenParams over C2
pub cf_pedersen_params: PedersenParams<C2>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
pub i: Option<CF1<C1>>,
/// initial state
pub z_0: Option<Vec<C1::ScalarField>>,
/// current i-th state
pub z_i: Option<Vec<C1::ScalarField>>,
/// Nova instances
pub u_i: Option<CommittedInstance<C1>>,
pub w_i: Option<Witness<C1>>,
pub U_i: Option<CommittedInstance<C1>>,
pub W_i: Option<Witness<C1>>,
/// CycleFold running instance
pub cf_U_i: Option<CommittedInstance<C2>>,
pub cf_W_i: Option<Witness<C2>>,
}
impl<C1, GC1, C2, GC2, CP1, CP2> DeciderEthCircuit<C1, GC1, C2, GC2, CP1, CP2>
where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>>,
CP1: CommitmentProver<C1>,
// enforce that the CP2 is Pedersen commitment, since we're at Ethereum's EVM decider
CP2: CommitmentProver<C2, Params = PedersenParams<C2>>,
{
pub fn from_nova<FC: FCircuit<C1::ScalarField>>(
nova: Nova<C1, GC1, C2, GC2, FC, CP1, CP2>,
) -> Self {
Self {
_c1: PhantomData,
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
_cp1: PhantomData,
_cp2: PhantomData,
E_len: nova.W_i.E.len(),
cf_E_len: nova.cf_W_i.E.len(),
r1cs: nova.r1cs,
cf_r1cs: nova.cf_r1cs,
cf_pedersen_params: nova.cf_cm_params,
poseidon_config: nova.poseidon_config,
i: Some(nova.i),
z_0: Some(nova.z_0),
z_i: Some(nova.z_i),
u_i: Some(nova.u_i),
w_i: Some(nova.w_i),
U_i: Some(nova.U_i),
W_i: Some(nova.W_i),
cf_U_i: Some(nova.cf_U_i),
cf_W_i: Some(nova.cf_W_i),
}
}
}
impl<C1, GC1, C2, GC2, CP1, CP2> ConstraintSynthesizer<CF1<C1>>
for DeciderEthCircuit<C1, GC1, C2, GC2, CP1, CP2>
where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>>,
CP1: CommitmentProver<C1>,
CP2: CommitmentProver<C2>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let r1cs =
R1CSVar::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
Ok(self.r1cs.clone())
})?;
let i =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::<C1>::zero)))?;
let z_0 = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let z_i = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let u_dummy_native = CommittedInstance::<C1>::dummy(1);
let w_dummy_native = Witness::<C1>::new(
vec![C1::ScalarField::zero(); self.r1cs.A.n_cols - 2 /* (2=1+1, since u_i.x.len=1) */],
self.E_len,
);
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
let w_i = WitnessVar::<C1>::new_witness(cs.clone(), || {
Ok(self.w_i.unwrap_or(w_dummy_native.clone()))
})?;
let U_i = CommittedInstanceVar::<C1>::new_input(cs.clone(), || {
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
})?;
let W_i = WitnessVar::<C1>::new_witness(cs.clone(), || {
Ok(self.W_i.unwrap_or(w_dummy_native.clone()))
})?;
let crh_params = CRHParametersVar::<C1::ScalarField>::new_constant(
cs.clone(),
self.poseidon_config.clone(),
)?;
// 1. check RelaxedR1CS of u_i
let z_u: Vec<FpVar<CF1<C1>>> = [
vec![FpVar::<CF1<C1>>::one()],
u_i.x.to_vec(),
w_i.W.to_vec(),
]
.concat();
RelaxedR1CSGadget::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::check(
r1cs.clone(),
w_i.E,
u_i.u.clone(),
z_u,
)?;
// 2. check RelaxedR1CS of U_i
let z_U: Vec<FpVar<CF1<C1>>> =
[vec![U_i.u.clone()], U_i.x.to_vec(), W_i.W.to_vec()].concat();
RelaxedR1CSGadget::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::check(
r1cs,
W_i.E,
U_i.u.clone(),
z_U,
)?;
// 3. u_i.cmE==cm(0), u_i.u==1
// Here zero_x & zero_y are the x & y coordinates of the zero point affine representation.
let zero_x = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
cs.clone(),
C1::BaseField::zero(),
)?
.to_constraint_field()?;
let zero_y = NonNativeFieldVar::<C1::BaseField, C1::ScalarField>::new_constant(
cs.clone(),
C1::BaseField::one(),
)?
.to_constraint_field()?;
(u_i.cmE.x.is_eq(&zero_x)?).enforce_equal(&Boolean::TRUE)?;
(u_i.cmE.y.is_eq(&zero_y)?).enforce_equal(&Boolean::TRUE)?;
(u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
// 4. u_i.x == H(i, z_0, z_i, U_i)
let u_i_x = U_i
.clone()
.hash(&crh_params, i.clone(), z_0.clone(), z_i.clone())?;
(u_i.x[0]).enforce_equal(&u_i_x)?;
// The following two checks (and their respective allocations) are disabled for normal
// tests since they take ~24.5M constraints and would take several minutes (and RAM) to run
// the test
#[cfg(not(test))]
{
// imports here instead of at the top of the file, so we avoid having multiple
// `#[cfg(not(test))]`
use crate::commitment::pedersen::PedersenGadget;
use crate::folding::nova::cyclefold::{CycleFoldCommittedInstanceVar, CF_IO_LEN};
use ark_r1cs_std::ToBitsGadget;
let cf_r1cs = R1CSVar::<
C1::BaseField,
CF1<C1>,
NonNativeFieldVar<C1::BaseField, CF1<C1>>,
>::new_witness(cs.clone(), || Ok(self.cf_r1cs.clone()))?;
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(CF_IO_LEN);
let w_dummy_native = Witness::<C2>::new(
vec![C2::ScalarField::zero(); self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l],
self.cf_E_len,
);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_W_i = CycleFoldWitnessVar::<C2>::new_witness(cs.clone(), || {
Ok(self.cf_W_i.unwrap_or(w_dummy_native.clone()))
})?;
// 5. check Pedersen commitments of cf_U_i.{cmE, cmW}
let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?;
let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?;
let cf_W_i_E_bits: Vec<Vec<Boolean<CF1<C1>>>> = cf_W_i
.E
.iter()
.map(|E_i| E_i.to_bits_le().unwrap())
.collect();
let cf_W_i_W_bits: Vec<Vec<Boolean<CF1<C1>>>> = cf_W_i
.W
.iter()
.map(|W_i| W_i.to_bits_le().unwrap())
.collect();
let computed_cmE = PedersenGadget::<C2, GC2>::commit(
H.clone(),
G.clone(),
cf_W_i_E_bits,
cf_W_i.rE.to_bits_le()?,
)?;
cf_U_i.cmE.enforce_equal(&computed_cmE)?;
let computed_cmW =
PedersenGadget::<C2, GC2>::commit(H, G, cf_W_i_W_bits, cf_W_i.rW.to_bits_le()?)?;
cf_U_i.cmW.enforce_equal(&computed_cmW)?;
// 6. check RelaxedR1CS of cf_U_i
let cf_z_U: Vec<NonNativeFieldVar<C2::ScalarField, CF1<C1>>> =
[vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
RelaxedR1CSGadget::<
C2::ScalarField,
CF1<C1>,
NonNativeFieldVar<C2::ScalarField, CF1<C1>>,
>::check(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?;
}
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_crypto_primitives::crh::{
sha256::{
constraints::{Sha256Gadget, UnitVar},
Sha256,
},
CRHScheme, CRHSchemeGadget,
};
use ark_ff::BigInteger;
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::{
alloc::AllocVar,
bits::uint8::UInt8,
eq::EqGadget,
fields::{fp::FpVar, nonnative::NonNativeFieldVar},
};
use ark_relations::r1cs::ConstraintSystem;
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::{get_pedersen_params_len, ProverParams, VerifierParams};
use crate::frontend::tests::{CubicFCircuit, CustomFCircuit, WrapperCircuit};
use crate::transcript::poseidon::poseidon_test_config;
use crate::FoldingScheme;
use crate::ccs::r1cs::{extract_r1cs, extract_w_x};
use crate::ccs::r1cs::{
tests::{get_test_r1cs, get_test_z},
R1CS,
};
#[test]
fn test_relaxed_r1cs_small_gadget_handcrafted() {
let r1cs: R1CS<Fr> = get_test_r1cs();
let rel_r1cs = r1cs.clone().relax();
let z = get_test_z(3);
let cs = ConstraintSystem::<Fr>::new_ref();
let zVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
let EVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(rel_r1cs.E)).unwrap();
let uVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(rel_r1cs.u)).unwrap();
let r1csVar = R1CSVar::<Fr, Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
RelaxedR1CSGadget::<Fr, Fr, FpVar<Fr>>::check(r1csVar, EVar, uVar, zVar).unwrap();
assert!(cs.is_satisfied().unwrap());
}
// gets as input a circuit that implements the ConstraintSynthesizer trait, and that has been
// initialized.
fn test_relaxed_r1cs_gadget<CS: ConstraintSynthesizer<Fr>>(circuit: CS) {
let cs = ConstraintSystem::<Fr>::new_ref();
circuit.generate_constraints(cs.clone()).unwrap();
cs.finalize();
assert!(cs.is_satisfied().unwrap());
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fr>(&cs);
let (w, x) = extract_w_x::<Fr>(&cs);
let z = [vec![Fr::one()], x, w].concat();
r1cs.check_relation(&z).unwrap();
let relaxed_r1cs = r1cs.clone().relax();
relaxed_r1cs.check_relation(&z).unwrap();
// set new CS for the circuit that checks the RelaxedR1CS of our original circuit
let cs = ConstraintSystem::<Fr>::new_ref();
// prepare the inputs for our circuit
let zVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
let EVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(relaxed_r1cs.E)).unwrap();
let uVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap();
let r1csVar = R1CSVar::<Fr, Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
RelaxedR1CSGadget::<Fr, Fr, FpVar<Fr>>::check(r1csVar, EVar, uVar, zVar).unwrap();
assert!(cs.is_satisfied().unwrap());
}
#[test]
fn test_relaxed_r1cs_small_gadget_arkworks() {
let z_i = vec![Fr::from(3_u32)];
let cubic_circuit = CubicFCircuit::<Fr>::new(());
let circuit = WrapperCircuit::<Fr, CubicFCircuit<Fr>> {
FC: cubic_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(cubic_circuit.step_native(z_i).unwrap()),
};
test_relaxed_r1cs_gadget(circuit);
}
struct Sha256TestCircuit<F: PrimeField> {
_f: PhantomData<F>,
pub x: Vec<u8>,
pub y: Vec<u8>,
}
impl<F: PrimeField> ConstraintSynthesizer<F> for Sha256TestCircuit<F> {
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> Result<(), SynthesisError> {
let x = Vec::<UInt8<F>>::new_witness(cs.clone(), || Ok(self.x))?;
let y = Vec::<UInt8<F>>::new_input(cs.clone(), || Ok(self.y))?;
let unitVar = UnitVar::default();
let comp_y = <Sha256Gadget<F> as CRHSchemeGadget<Sha256, F>>::evaluate(&unitVar, &x)?;
comp_y.0.enforce_equal(&y)?;
Ok(())
}
}
#[test]
fn test_relaxed_r1cs_medium_gadget_arkworks() {
let x = Fr::from(5_u32).into_bigint().to_bytes_le();
let y = <Sha256 as CRHScheme>::evaluate(&(), x.clone()).unwrap();
let circuit = Sha256TestCircuit::<Fr> {
_f: PhantomData,
x,
y,
};
test_relaxed_r1cs_gadget(circuit);
}
#[test]
fn test_relaxed_r1cs_custom_circuit() {
let n_constraints = 10_000;
let custom_circuit = CustomFCircuit::<Fr>::new(n_constraints);
let z_i = vec![Fr::from(5_u32)];
let circuit = WrapperCircuit::<Fr, CustomFCircuit<Fr>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(z_i).unwrap()),
};
test_relaxed_r1cs_gadget(circuit);
}
#[test]
fn test_relaxed_r1cs_nonnative_circuit() {
let cs = ConstraintSystem::<Fq>::new_ref();
// in practice we would use CycleFoldCircuit, but is a very big circuit (when computed
// non-natively inside the RelaxedR1CS circuit), so in order to have a short test we use a
// custom circuit.
let custom_circuit = CustomFCircuit::<Fq>::new(10);
let z_i = vec![Fq::from(5_u32)];
let circuit = WrapperCircuit::<Fq, CustomFCircuit<Fq>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(z_i).unwrap()),
};
circuit.generate_constraints(cs.clone()).unwrap();
cs.finalize();
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fq>(&cs);
let (w, x) = extract_w_x::<Fq>(&cs);
let z = [vec![Fq::one()], x, w].concat();
let relaxed_r1cs = r1cs.clone().relax();
// natively
let cs = ConstraintSystem::<Fq>::new_ref();
let zVar = Vec::<FpVar<Fq>>::new_witness(cs.clone(), || Ok(z.clone())).unwrap();
let EVar =
Vec::<FpVar<Fq>>::new_witness(cs.clone(), || Ok(relaxed_r1cs.clone().E)).unwrap();
let uVar = FpVar::<Fq>::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap();
let r1csVar =
R1CSVar::<Fq, Fq, FpVar<Fq>>::new_witness(cs.clone(), || Ok(r1cs.clone())).unwrap();
RelaxedR1CSGadget::<Fq, Fq, FpVar<Fq>>::check(r1csVar, EVar, uVar, zVar).unwrap();
// non-natively
let cs = ConstraintSystem::<Fr>::new_ref();
let zVar = Vec::<NonNativeFieldVar<Fq, Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
let EVar = Vec::<NonNativeFieldVar<Fq, Fr>>::new_witness(cs.clone(), || Ok(relaxed_r1cs.E))
.unwrap();
let uVar =
NonNativeFieldVar::<Fq, Fr>::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap();
let r1csVar =
R1CSVar::<Fq, Fr, NonNativeFieldVar<Fq, Fr>>::new_witness(cs.clone(), || Ok(r1cs))
.unwrap();
RelaxedR1CSGadget::<Fq, Fr, NonNativeFieldVar<Fq, Fr>>::check(r1csVar, EVar, uVar, zVar)
.unwrap();
}
#[test]
fn test_decider_circuit() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(());
let z_0 = vec![Fr::from(3_u32)];
// get the CM & CF_CM len
let (cm_len, cf_cm_len) =
get_pedersen_params_len::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>>(
&poseidon_config,
F_circuit,
)
.unwrap();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
let prover_params =
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
poseidon_config: poseidon_config.clone(),
cm_params: pedersen_params,
cf_cm_params: cf_pedersen_params,
};
type NOVA = Nova<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
>;
// generate a Nova instance and do a step of it
let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap();
nova.prove_step().unwrap();
let ivc_v = nova.clone();
let verifier_params = VerifierParams::<Projective, Projective2> {
poseidon_config: poseidon_config.clone(),
r1cs: ivc_v.r1cs,
cf_r1cs: ivc_v.cf_r1cs,
};
NOVA::verify(
verifier_params,
z_0,
ivc_v.z_i,
Fr::one(),
(ivc_v.U_i, ivc_v.W_i),
(ivc_v.u_i, ivc_v.w_i),
(ivc_v.cf_U_i, ivc_v.cf_W_i),
)
.unwrap();
// load the DeciderEthCircuit from the generated Nova instance
let decider_circuit = DeciderEthCircuit::<
Projective,
GVar,
Projective2,
GVar2,
Pedersen<Projective>,
Pedersen<Projective2>,
>::from_nova(nova);
let cs = ConstraintSystem::<Fr>::new_ref();
// generate the constraints and check that are satisfied by the inputs
decider_circuit.generate_constraints(cs.clone()).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
}

View File

@@ -0,0 +1,717 @@
/// Implements the scheme described in [Nova](https://eprint.iacr.org/2021/370.pdf) and
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf).
use ark_crypto_primitives::{
crh::{poseidon::CRH, CRHScheme},
sponge::{poseidon::PoseidonConfig, Absorb},
};
use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar};
use ark_std::fmt::Debug;
use ark_std::{One, Zero};
use core::marker::PhantomData;
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
use crate::ccs::r1cs::{extract_r1cs, extract_w_x, R1CS};
use crate::commitment::CommitmentProver;
use crate::folding::circuits::nonnative::point_to_nonnative_limbs;
use crate::frontend::FCircuit;
use crate::utils::vec::is_zero_vec;
use crate::Error;
use crate::FoldingScheme;
pub mod circuits;
pub mod cyclefold;
pub mod decider_eth;
pub mod decider_eth_circuit;
pub mod nifs;
pub mod traits;
use circuits::{AugmentedFCircuit, ChallengeGadget, CF2};
use cyclefold::{CycleFoldChallengeGadget, CycleFoldCircuit};
use nifs::NIFS;
use traits::NovaR1CS;
#[cfg(test)]
use cyclefold::CF_IO_LEN;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct CommittedInstance<C: CurveGroup> {
pub cmE: C,
pub u: C::ScalarField,
pub cmW: C,
pub x: Vec<C::ScalarField>,
}
impl<C: CurveGroup> CommittedInstance<C> {
pub fn dummy(io_len: usize) -> Self {
Self {
cmE: C::zero(),
u: C::ScalarField::zero(),
cmW: C::zero(),
x: vec![C::ScalarField::zero(); io_len],
}
}
}
impl<C: CurveGroup> CommittedInstance<C>
where
<C as Group>::ScalarField: Absorb,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
/// hash implements the committed instance hash compatible with the gadget implemented in
/// nova/circuits.rs::CommittedInstanceVar.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the
/// `CommittedInstance`.
pub fn hash(
&self,
poseidon_config: &PoseidonConfig<C::ScalarField>,
i: C::ScalarField,
z_0: Vec<C::ScalarField>,
z_i: Vec<C::ScalarField>,
) -> Result<C::ScalarField, Error> {
let (cmE_x, cmE_y) = point_to_nonnative_limbs::<C>(self.cmE)?;
let (cmW_x, cmW_y) = point_to_nonnative_limbs::<C>(self.cmW)?;
CRH::<C::ScalarField>::evaluate(
poseidon_config,
vec![
vec![i],
z_0,
z_i,
vec![self.u],
self.x.clone(),
cmE_x,
cmE_y,
cmW_x,
cmW_y,
]
.concat(),
)
.map_err(|e| Error::Other(e.to_string()))
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Witness<C: CurveGroup> {
pub E: Vec<C::ScalarField>,
pub rE: C::ScalarField,
pub W: Vec<C::ScalarField>,
pub rW: C::ScalarField,
}
impl<C: CurveGroup> Witness<C>
where
<C as Group>::ScalarField: Absorb,
{
pub fn new(w: Vec<C::ScalarField>, e_len: usize) -> Self {
// note: at the current version, we don't use the blinding factors and we set them to 0
// always.
Self {
E: vec![C::ScalarField::zero(); e_len],
rE: C::ScalarField::zero(),
W: w,
rW: C::ScalarField::zero(),
}
}
pub fn commit<CP: CommitmentProver<C>>(
&self,
params: &CP::Params,
x: Vec<C::ScalarField>,
) -> Result<CommittedInstance<C>, Error> {
let mut cmE = C::zero();
if !is_zero_vec::<C::ScalarField>(&self.E) {
cmE = CP::commit(params, &self.E, &self.rE)?;
}
let cmW = CP::commit(params, &self.W, &self.rW)?;
Ok(CommittedInstance {
cmE,
u: C::ScalarField::one(),
cmW,
x,
})
}
}
#[derive(Debug, Clone)]
pub struct ProverParams<C1, C2, CP1, CP2>
where
C1: CurveGroup,
C2: CurveGroup,
CP1: CommitmentProver<C1>,
CP2: CommitmentProver<C2>,
{
pub poseidon_config: PoseidonConfig<C1::ScalarField>,
pub cm_params: CP1::Params,
pub cf_cm_params: CP2::Params,
}
#[derive(Debug, Clone)]
pub struct VerifierParams<C1: CurveGroup, C2: CurveGroup> {
pub poseidon_config: PoseidonConfig<C1::ScalarField>,
pub r1cs: R1CS<C1::ScalarField>,
pub cf_r1cs: R1CS<C2::ScalarField>,
}
/// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait
#[derive(Clone, Debug)]
pub struct Nova<C1, GC1, C2, GC2, FC, CP1, CP2>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CP1: CommitmentProver<C1>,
CP2: CommitmentProver<C2>,
{
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
/// R1CS of the Augmented Function circuit
pub r1cs: R1CS<C1::ScalarField>,
/// R1CS of the CycleFold circuit
pub cf_r1cs: R1CS<C2::ScalarField>,
pub poseidon_config: PoseidonConfig<C1::ScalarField>,
/// CommitmentProver::Params over C1
pub cm_params: CP1::Params,
/// CycleFold CommitmentProver::Params, over C2
pub cf_cm_params: CP2::Params,
/// F circuit, the circuit that is being folded
pub F: FC,
pub i: C1::ScalarField,
/// initial state
pub z_0: Vec<C1::ScalarField>,
/// current i-th state
pub z_i: Vec<C1::ScalarField>,
/// Nova instances
pub w_i: Witness<C1>,
pub u_i: CommittedInstance<C1>,
pub W_i: Witness<C1>,
pub U_i: CommittedInstance<C1>,
/// CycleFold running instance
pub cf_W_i: Witness<C2>,
pub cf_U_i: CommittedInstance<C2>,
}
impl<C1, GC1, C2, GC2, FC, CP1, CP2> FoldingScheme<C1, C2, FC>
for Nova<C1, GC1, C2, GC2, FC, CP1, CP2>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CP1: CommitmentProver<C1>,
CP2: CommitmentProver<C2>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
type PreprocessorParam = (Self::ProverParam, FC);
type ProverParam = ProverParams<C1, C2, CP1, CP2>;
type VerifierParam = VerifierParams<C1, C2>;
type CommittedInstanceWithWitness = (CommittedInstance<C1>, Witness<C1>);
type CFCommittedInstanceWithWitness = (CommittedInstance<C2>, Witness<C2>);
fn preprocess(
prep_param: &Self::PreprocessorParam,
) -> Result<(Self::ProverParam, Self::VerifierParam), Error> {
let (prover_params, F_circuit) = prep_param;
let (r1cs, cf_r1cs) =
get_r1cs::<C1, GC1, C2, GC2, FC>(&prover_params.poseidon_config, *F_circuit)?;
let verifier_params = VerifierParams::<C1, C2> {
poseidon_config: prover_params.poseidon_config.clone(),
r1cs,
cf_r1cs,
};
Ok((prover_params.clone(), verifier_params))
}
/// Initializes the Nova+CycleFold's IVC for the given parameters and initial state `z_0`.
fn init(pp: &Self::ProverParam, F: FC, z_0: Vec<C1::ScalarField>) -> Result<Self, Error> {
// prepare the circuit to obtain its R1CS
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
let augmented_F_circuit =
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(&pp.poseidon_config, F);
let cf_circuit = CycleFoldCircuit::<C1, GC1>::empty();
augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
// setup the dummy instances
let (w_dummy, u_dummy) = r1cs.dummy_instance();
let (cf_w_dummy, cf_u_dummy) = cf_r1cs.dummy_instance();
// W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the
// R1CS that we're working with.
Ok(Self {
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
r1cs,
cf_r1cs,
poseidon_config: pp.poseidon_config.clone(),
cm_params: pp.cm_params.clone(),
cf_cm_params: pp.cf_cm_params.clone(),
F,
i: C1::ScalarField::zero(),
z_0: z_0.clone(),
z_i: z_0,
w_i: w_dummy.clone(),
u_i: u_dummy.clone(),
W_i: w_dummy,
U_i: u_dummy,
// cyclefold running instance
cf_W_i: cf_w_dummy.clone(),
cf_U_i: cf_u_dummy.clone(),
})
}
/// Implements IVC.P of Nova+CycleFold
fn prove_step(&mut self) -> Result<(), Error> {
let augmented_F_circuit: AugmentedFCircuit<C1, C2, GC2, FC>;
let cf_circuit: CycleFoldCircuit<C1, GC1>;
let z_i1 = self.F.step_native(self.z_i.clone())?;
// compute T and cmT for AugmentedFCircuit
let (T, cmT) = self.compute_cmT()?;
let r_bits = ChallengeGadget::<C1>::get_challenge_native(
&self.poseidon_config,
self.u_i.clone(),
self.U_i.clone(),
cmT,
)?;
let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?;
// fold Nova instances
let (W_i1, U_i1): (Witness<C1>, CommittedInstance<C1>) = NIFS::<C1, CP1>::fold_instances(
r_Fr, &self.w_i, &self.u_i, &self.W_i, &self.U_i, &T, cmT,
)?;
// folded instance output (public input, x)
// u_{i+1}.x = H(i+1, z_0, z_{i+1}, U_{i+1})
let u_i1_x = U_i1.hash(
&self.poseidon_config,
self.i + C1::ScalarField::one(),
self.z_0.clone(),
z_i1.clone(),
)?;
if self.i == C1::ScalarField::zero() {
// base case
augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC> {
_gc2: PhantomData,
poseidon_config: self.poseidon_config.clone(),
i: Some(C1::ScalarField::zero()), // = i=0
z_0: Some(self.z_0.clone()), // = z_i
z_i: Some(self.z_i.clone()),
u_i: Some(self.u_i.clone()), // = dummy
U_i: Some(self.U_i.clone()), // = dummy
U_i1: Some(U_i1.clone()),
cmT: Some(cmT),
F: self.F,
x: Some(u_i1_x),
cf_u_i: None,
cf_U_i: None,
cf_U_i1: None,
cf_cmT: None,
cf_r_nonnat: None,
};
#[cfg(test)]
NIFS::<C1, CP1>::verify_folded_instance(r_Fr, &self.u_i, &self.U_i, &U_i1, &cmT)?;
} else {
// CycleFold part:
// get the vector used as public inputs 'x' in the CycleFold circuit
let cf_u_i_x = [
get_committed_instance_coordinates(&self.u_i),
get_committed_instance_coordinates(&self.U_i),
get_committed_instance_coordinates(&U_i1),
]
.concat();
cf_circuit = CycleFoldCircuit::<C1, GC1> {
_gc: PhantomData,
r_bits: Some(r_bits.clone()),
cmT: Some(cmT),
u_i: Some(self.u_i.clone()),
U_i: Some(self.U_i.clone()),
U_i1: Some(U_i1.clone()),
x: Some(cf_u_i_x.clone()),
};
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
cf_circuit.generate_constraints(cs2.clone())?;
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let (cf_w_i, cf_x_i) = extract_w_x::<C1::BaseField>(&cs2);
if cf_x_i != cf_u_i_x {
return Err(Error::NotEqual);
}
#[cfg(test)]
if cf_x_i.len() != CF_IO_LEN {
return Err(Error::NotExpectedLength(cf_x_i.len(), CF_IO_LEN));
}
// fold cyclefold instances
let cf_w_i = Witness::<C2>::new(cf_w_i.clone(), self.cf_r1cs.A.n_rows);
let cf_u_i: CommittedInstance<C2> =
cf_w_i.commit::<CP2>(&self.cf_cm_params, cf_x_i.clone())?;
// compute T* and cmT* for CycleFoldCircuit
let (cf_T, cf_cmT) = self.compute_cf_cmT(&cf_w_i, &cf_u_i)?;
let cf_r_bits = CycleFoldChallengeGadget::<C2, GC2>::get_challenge_native(
&self.poseidon_config,
cf_u_i.clone(),
self.cf_U_i.clone(),
cf_cmT,
)?;
let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits))
.ok_or(Error::OutOfBounds)?;
let (cf_W_i1, cf_U_i1) = NIFS::<C2, CP2>::fold_instances(
cf_r_Fq,
&self.cf_W_i,
&self.cf_U_i,
&cf_w_i,
&cf_u_i,
&cf_T,
cf_cmT,
)?;
augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC> {
_gc2: PhantomData,
poseidon_config: self.poseidon_config.clone(),
i: Some(self.i),
z_0: Some(self.z_0.clone()),
z_i: Some(self.z_i.clone()),
u_i: Some(self.u_i.clone()),
U_i: Some(self.U_i.clone()),
U_i1: Some(U_i1.clone()),
cmT: Some(cmT),
F: self.F,
x: Some(u_i1_x),
// cyclefold values
cf_u_i: Some(cf_u_i.clone()),
cf_U_i: Some(self.cf_U_i.clone()),
cf_U_i1: Some(cf_U_i1.clone()),
cf_cmT: Some(cf_cmT),
cf_r_nonnat: Some(cf_r_Fq),
};
self.cf_W_i = cf_W_i1.clone();
self.cf_U_i = cf_U_i1.clone();
#[cfg(test)]
{
self.cf_r1cs.check_instance_relation(&cf_w_i, &cf_u_i)?;
self.cf_r1cs
.check_relaxed_instance_relation(&self.cf_W_i, &self.cf_U_i)?;
}
}
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
augmented_F_circuit.generate_constraints(cs.clone())?;
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let (w_i1, x_i1) = extract_w_x::<C1::ScalarField>(&cs);
if x_i1[0] != u_i1_x {
return Err(Error::NotEqual);
}
#[cfg(test)]
if x_i1.len() != 1 {
return Err(Error::NotExpectedLength(x_i1.len(), 1));
}
// set values for next iteration
self.i += C1::ScalarField::one();
self.z_i = z_i1.clone();
self.w_i = Witness::<C1>::new(w_i1, self.r1cs.A.n_rows);
self.u_i = self.w_i.commit::<CP1>(&self.cm_params, vec![u_i1_x])?;
self.W_i = W_i1.clone();
self.U_i = U_i1.clone();
#[cfg(test)]
{
self.r1cs.check_instance_relation(&self.w_i, &self.u_i)?;
self.r1cs
.check_relaxed_instance_relation(&self.W_i, &self.U_i)?;
}
Ok(())
}
fn state(&self) -> Vec<C1::ScalarField> {
self.z_i.clone()
}
fn instances(
&self,
) -> (
Self::CommittedInstanceWithWitness,
Self::CommittedInstanceWithWitness,
Self::CFCommittedInstanceWithWitness,
) {
(
(self.U_i.clone(), self.W_i.clone()),
(self.u_i.clone(), self.w_i.clone()),
(self.cf_U_i.clone(), self.cf_W_i.clone()),
)
}
/// Implements IVC.V of Nova+CycleFold
fn verify(
vp: Self::VerifierParam,
z_0: Vec<C1::ScalarField>, // initial state
z_i: Vec<C1::ScalarField>, // last state
num_steps: C1::ScalarField,
running_instance: Self::CommittedInstanceWithWitness,
incomming_instance: Self::CommittedInstanceWithWitness,
cyclefold_instance: Self::CFCommittedInstanceWithWitness,
) -> Result<(), Error> {
let (U_i, W_i) = running_instance;
let (u_i, w_i) = incomming_instance;
let (cf_U_i, cf_W_i) = cyclefold_instance;
if u_i.x.len() != 1 || U_i.x.len() != 1 {
return Err(Error::IVCVerificationFail);
}
// check that u_i's output points to the running instance
// u_i.X == H(i, z_0, z_i, U_i)
let expected_u_i_x = U_i.hash(&vp.poseidon_config, num_steps, z_0, z_i.clone())?;
if expected_u_i_x != u_i.x[0] {
return Err(Error::IVCVerificationFail);
}
// check u_i.cmE==0, u_i.u==1 (=u_i is a un-relaxed instance)
if !u_i.cmE.is_zero() || !u_i.u.is_one() {
return Err(Error::IVCVerificationFail);
}
// check R1CS satisfiability
vp.r1cs.check_instance_relation(&w_i, &u_i)?;
// check RelaxedR1CS satisfiability
vp.r1cs.check_relaxed_instance_relation(&W_i, &U_i)?;
// check CycleFold RelaxedR1CS satisfiability
vp.cf_r1cs
.check_relaxed_instance_relation(&cf_W_i, &cf_U_i)?;
Ok(())
}
}
impl<C1, GC1, C2, GC2, FC, CP1, CP2> Nova<C1, GC1, C2, GC2, FC, CP1, CP2>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CP1: CommitmentProver<C1>,
CP2: CommitmentProver<C2>,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
{
// computes T and cmT for the AugmentedFCircuit
fn compute_cmT(&self) -> Result<(Vec<C1::ScalarField>, C1), Error> {
NIFS::<C1, CP1>::compute_cmT(
&self.cm_params,
&self.r1cs,
&self.w_i,
&self.u_i,
&self.W_i,
&self.U_i,
)
}
// computes T* and cmT* for the CycleFoldCircuit
fn compute_cf_cmT(
&self,
cf_w_i: &Witness<C2>,
cf_u_i: &CommittedInstance<C2>,
) -> Result<(Vec<C2::ScalarField>, C2), Error> {
NIFS::<C2, CP2>::compute_cyclefold_cmT(
&self.cf_cm_params,
&self.cf_r1cs,
cf_w_i,
cf_u_i,
&self.cf_W_i,
&self.cf_U_i,
)
}
}
/// helper method to get the r1cs from the ConstraintSynthesizer
pub fn get_r1cs_from_cs<F: PrimeField>(
circuit: impl ConstraintSynthesizer<F>,
) -> Result<R1CS<F>, Error> {
let cs = ConstraintSystem::<F>::new_ref();
circuit.generate_constraints(cs.clone())?;
cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<F>(&cs);
Ok(r1cs)
}
/// helper method to get the R1CS for both the AugmentedFCircuit and the CycleFold circuit
#[allow(clippy::type_complexity)]
pub fn get_r1cs<C1, GC1, C2, GC2, FC>(
poseidon_config: &PoseidonConfig<C1::ScalarField>,
F_circuit: FC,
) -> Result<(R1CS<C1::ScalarField>, R1CS<C2::ScalarField>), Error>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
let augmented_F_circuit =
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(poseidon_config, F_circuit);
let cf_circuit = CycleFoldCircuit::<C1, GC1>::empty();
let r1cs = get_r1cs_from_cs::<C1::ScalarField>(augmented_F_circuit)?;
let cf_r1cs = get_r1cs_from_cs::<C2::ScalarField>(cf_circuit)?;
Ok((r1cs, cf_r1cs))
}
/// helper method to get the pedersen params length for both the AugmentedFCircuit and the
/// CycleFold circuit
pub fn get_pedersen_params_len<C1, GC1, C2, GC2, FC>(
poseidon_config: &PoseidonConfig<C1::ScalarField>,
F_circuit: FC,
) -> Result<(usize, usize), Error>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
let (r1cs, cf_r1cs) = get_r1cs::<C1, GC1, C2, GC2, FC>(poseidon_config, F_circuit)?;
Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows))
}
pub(crate) fn get_committed_instance_coordinates<C: CurveGroup>(
u: &CommittedInstance<C>,
) -> Vec<C::BaseField> {
let zero = (&C::BaseField::zero(), &C::BaseField::one());
let cmE = u.cmE.into_affine();
let (cmE_x, cmE_y) = cmE.xy().unwrap_or(zero);
let cmW = u.cmW.into_affine();
let (cmW_x, cmW_y) = cmW.xy().unwrap_or(zero);
vec![*cmE_x, *cmE_y, *cmW_x, *cmW_y]
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::{constraints::GVar, Fr, Projective};
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use crate::commitment::pedersen::Pedersen;
use crate::frontend::tests::CubicFCircuit;
use crate::transcript::poseidon::poseidon_test_config;
#[test]
fn test_ivc() {
type NOVA = Nova<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
>;
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_test_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(());
let z_0 = vec![Fr::from(3_u32)];
let (cm_len, cf_cm_len) =
get_pedersen_params_len::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>>(
&poseidon_config,
F_circuit,
)
.unwrap();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, cm_len);
let cf_pedersen_params = Pedersen::<Projective2>::new_params(&mut rng, cf_cm_len);
let prover_params =
ProverParams::<Projective, Projective2, Pedersen<Projective>, Pedersen<Projective2>> {
poseidon_config: poseidon_config.clone(),
cm_params: pedersen_params,
cf_cm_params: cf_pedersen_params,
};
let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap();
let num_steps: usize = 3;
for _ in 0..num_steps {
nova.prove_step().unwrap();
}
assert_eq!(Fr::from(num_steps as u32), nova.i);
let verifier_params = VerifierParams::<Projective, Projective2> {
poseidon_config,
r1cs: nova.r1cs,
cf_r1cs: nova.cf_r1cs,
};
NOVA::verify(
verifier_params,
z_0,
nova.z_i,
nova.i,
(nova.U_i, nova.W_i),
(nova.u_i, nova.w_i),
(nova.cf_U_i, nova.cf_W_i),
)
.unwrap();
}
}

View File

@@ -0,0 +1,481 @@
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_std::One;
use std::marker::PhantomData;
use super::{CommittedInstance, Witness};
use crate::ccs::r1cs::R1CS;
use crate::commitment::CommitmentProver;
use crate::transcript::Transcript;
use crate::utils::vec::*;
use crate::Error;
/// Implements the Non-Interactive Folding Scheme described in section 4 of
/// [Nova](https://eprint.iacr.org/2021/370.pdf)
pub struct NIFS<C: CurveGroup, CP: CommitmentProver<C>> {
_c: PhantomData<C>,
_cp: PhantomData<CP>,
}
impl<C: CurveGroup, CP: CommitmentProver<C>> NIFS<C, CP>
where
<C as Group>::ScalarField: Absorb,
{
// compute_T: compute cross-terms T
pub fn compute_T(
r1cs: &R1CS<C::ScalarField>,
u1: C::ScalarField,
u2: C::ScalarField,
z1: &[C::ScalarField],
z2: &[C::ScalarField],
) -> Result<Vec<C::ScalarField>, Error> {
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
// this is parallelizable (for the future)
let Az1 = mat_vec_mul_sparse(&A, z1)?;
let Bz1 = mat_vec_mul_sparse(&B, z1)?;
let Cz1 = mat_vec_mul_sparse(&C, z1)?;
let Az2 = mat_vec_mul_sparse(&A, z2)?;
let Bz2 = mat_vec_mul_sparse(&B, z2)?;
let Cz2 = mat_vec_mul_sparse(&C, z2)?;
let Az1_Bz2 = hadamard(&Az1, &Bz2)?;
let Az2_Bz1 = hadamard(&Az2, &Bz1)?;
let u1Cz2 = vec_scalar_mul(&Cz2, &u1);
let u2Cz1 = vec_scalar_mul(&Cz1, &u2);
vec_sub(&vec_sub(&vec_add(&Az1_Bz2, &Az2_Bz1)?, &u1Cz2)?, &u2Cz1)
}
pub fn fold_witness(
r: C::ScalarField,
w1: &Witness<C>,
w2: &Witness<C>,
T: &[C::ScalarField],
rT: C::ScalarField,
) -> Result<Witness<C>, Error> {
let r2 = r * r;
let E: Vec<C::ScalarField> = vec_add(
&vec_add(&w1.E, &vec_scalar_mul(T, &r))?,
&vec_scalar_mul(&w2.E, &r2),
)?;
let rE = w1.rE + r * rT + r2 * w2.rE;
let W: Vec<C::ScalarField> = w1.W.iter().zip(&w2.W).map(|(a, b)| *a + (r * b)).collect();
let rW = w1.rW + r * w2.rW;
Ok(Witness::<C> { E, rE, W, rW })
}
pub fn fold_committed_instance(
r: C::ScalarField,
ci1: &CommittedInstance<C>,
ci2: &CommittedInstance<C>,
cmT: &C,
) -> CommittedInstance<C> {
let r2 = r * r;
let cmE = ci1.cmE + cmT.mul(r) + ci2.cmE.mul(r2);
let u = ci1.u + r * ci2.u;
let cmW = ci1.cmW + ci2.cmW.mul(r);
let x = ci1
.x
.iter()
.zip(&ci2.x)
.map(|(a, b)| *a + (r * b))
.collect::<Vec<C::ScalarField>>();
CommittedInstance::<C> { cmE, u, cmW, x }
}
/// NIFS.P is the consecutive combination of compute_cmT with fold_instances
/// compute_cmT is part of the NIFS.P logic
pub fn compute_cmT(
cm_prover_params: &CP::Params,
r1cs: &R1CS<C::ScalarField>,
w1: &Witness<C>,
ci1: &CommittedInstance<C>,
w2: &Witness<C>,
ci2: &CommittedInstance<C>,
) -> Result<(Vec<C::ScalarField>, C), Error> {
let z1: Vec<C::ScalarField> = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat();
let z2: Vec<C::ScalarField> = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat();
// compute cross terms
let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?;
// use r_T=1 since we don't need hiding property for cm(T)
let cmT = CP::commit(cm_prover_params, &T, &C::ScalarField::one())?;
Ok((T, cmT))
}
pub fn compute_cyclefold_cmT(
cm_prover_params: &CP::Params,
r1cs: &R1CS<C::ScalarField>, // R1CS over C2.Fr=C1.Fq (here C=C2)
w1: &Witness<C>,
ci1: &CommittedInstance<C>,
w2: &Witness<C>,
ci2: &CommittedInstance<C>,
) -> Result<(Vec<C::ScalarField>, C), Error>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
let z1: Vec<C::ScalarField> = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat();
let z2: Vec<C::ScalarField> = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat();
// compute cross terms
let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?;
// use r_T=1 since we don't need hiding property for cm(T)
let cmT = CP::commit(cm_prover_params, &T, &C::ScalarField::one())?;
Ok((T, cmT))
}
/// fold_instances is part of the NIFS.P logic described in
/// [Nova](https://eprint.iacr.org/2021/370.pdf)'s section 4. It returns the folded Committed
/// Instances and the Witness.
pub fn fold_instances(
r: C::ScalarField,
w1: &Witness<C>,
ci1: &CommittedInstance<C>,
w2: &Witness<C>,
ci2: &CommittedInstance<C>,
T: &[C::ScalarField],
cmT: C,
) -> Result<(Witness<C>, CommittedInstance<C>), Error> {
// fold witness
// use r_T=1 since we don't need hiding property for cm(T)
let w3 = NIFS::<C, CP>::fold_witness(r, w1, w2, T, C::ScalarField::one())?;
// fold committed instancs
let ci3 = NIFS::<C, CP>::fold_committed_instance(r, ci1, ci2, &cmT);
Ok((w3, ci3))
}
/// verify implements NIFS.V logic described in [Nova](https://eprint.iacr.org/2021/370.pdf)'s
/// section 4. It returns the folded Committed Instance
pub fn verify(
// r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element
r: C::ScalarField,
ci1: &CommittedInstance<C>,
ci2: &CommittedInstance<C>,
cmT: &C,
) -> CommittedInstance<C> {
NIFS::<C, CP>::fold_committed_instance(r, ci1, ci2, cmT)
}
/// Verify commited folded instance (ci) relations. Notice that this method does not open the
/// commitments, but just checks that the given committed instances (ci1, ci2) when folded
/// result in the folded committed instance (ci3) values.
pub fn verify_folded_instance(
r: C::ScalarField,
ci1: &CommittedInstance<C>,
ci2: &CommittedInstance<C>,
ci3: &CommittedInstance<C>,
cmT: &C,
) -> Result<(), Error> {
let expected = Self::fold_committed_instance(r, ci1, ci2, cmT);
if ci3.cmE != expected.cmE
|| ci3.u != expected.u
|| ci3.cmW != expected.cmW
|| ci3.x != expected.x
{
return Err(Error::NotSatisfied);
}
Ok(())
}
pub fn prove_commitments(
tr: &mut impl Transcript<C>,
cm_prover_params: &CP::Params,
w: &Witness<C>,
ci: &CommittedInstance<C>,
T: Vec<C::ScalarField>,
cmT: &C,
) -> Result<[CP::Proof; 3], Error> {
let cmE_proof = CP::prove(cm_prover_params, tr, &ci.cmE, &w.E, &w.rE)?;
let cmW_proof = CP::prove(cm_prover_params, tr, &ci.cmW, &w.W, &w.rW)?;
let cmT_proof = CP::prove(cm_prover_params, tr, cmT, &T, &C::ScalarField::one())?; // cm(T) is committed with rT=1
Ok([cmE_proof, cmW_proof, cmT_proof])
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
use ark_ff::{BigInteger, PrimeField};
use ark_pallas::{Fr, Projective};
use ark_std::{ops::Mul, UniformRand, Zero};
use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z};
use crate::commitment::pedersen::{Params as PedersenParams, Pedersen};
use crate::folding::nova::circuits::ChallengeGadget;
use crate::folding::nova::traits::NovaR1CS;
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
use crate::utils::vec::vec_scalar_mul;
#[allow(clippy::type_complexity)]
pub(crate) fn prepare_simple_fold_inputs() -> (
PedersenParams<Projective>,
PoseidonConfig<Fr>,
R1CS<Fr>,
Witness<Projective>, // w1
CommittedInstance<Projective>, // ci1
Witness<Projective>, // w2
CommittedInstance<Projective>, // ci2
Witness<Projective>, // w3
CommittedInstance<Projective>, // ci3
Vec<Fr>, // T
Projective, // cmT
Vec<bool>, // r_bits
Fr, // r_Fr
) {
let r1cs = get_test_r1cs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let (w1, x1) = r1cs.split_z(&z1);
let (w2, x2) = r1cs.split_z(&z2);
let w1 = Witness::<Projective>::new(w1.clone(), r1cs.A.n_rows);
let w2 = Witness::<Projective>::new(w2.clone(), r1cs.A.n_rows);
let mut rng = ark_std::test_rng();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_cols);
// compute committed instances
let ci1 = w1
.commit::<Pedersen<Projective>>(&pedersen_params, x1.clone())
.unwrap();
let ci2 = w2
.commit::<Pedersen<Projective>>(&pedersen_params, x2.clone())
.unwrap();
// NIFS.P
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
&pedersen_params,
&r1cs,
&w1,
&ci1,
&w2,
&ci2,
)
.unwrap();
let poseidon_config = poseidon_test_config::<Fr>();
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
&poseidon_config,
ci1.clone(),
ci2.clone(),
cmT,
)
.unwrap();
let r_Fr = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
let (w3, ci3) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
r_Fr, &w1, &ci1, &w2, &ci2, &T, cmT,
)
.unwrap();
(
pedersen_params,
poseidon_config,
r1cs,
w1,
ci1,
w2,
ci2,
w3,
ci3,
T,
cmT,
r_bits,
r_Fr,
)
}
// fold 2 dummy instances and check that the folded instance holds the relaxed R1CS relation
#[test]
fn test_nifs_fold_dummy() {
let r1cs = get_test_r1cs::<Fr>();
let z1 = get_test_z(3);
let (w1, x1) = r1cs.split_z(&z1);
let mut rng = ark_std::test_rng();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_cols);
// dummy instance, witness and public inputs zeroes
let w_dummy = Witness::<Projective>::new(vec![Fr::zero(); w1.len()], r1cs.A.n_rows);
let mut u_dummy = w_dummy
.commit::<Pedersen<Projective>>(&pedersen_params, vec![Fr::zero(); x1.len()])
.unwrap();
u_dummy.u = Fr::zero();
let w_i = w_dummy.clone();
let u_i = u_dummy.clone();
let W_i = w_dummy.clone();
let U_i = u_dummy.clone();
r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap();
r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap();
let r_Fr = Fr::from(3_u32);
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
&pedersen_params,
&r1cs,
&w_i,
&u_i,
&W_i,
&U_i,
)
.unwrap();
let (W_i1, U_i1) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT,
)
.unwrap();
r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap();
}
// fold 2 instances into one
#[test]
fn test_nifs_one_fold() {
let (pedersen_params, poseidon_config, r1cs, w1, ci1, w2, ci2, w3, ci3, T, cmT, _, r) =
prepare_simple_fold_inputs();
// NIFS.V
let ci3_v = NIFS::<Projective, Pedersen<Projective>>::verify(r, &ci1, &ci2, &cmT);
assert_eq!(ci3_v, ci3);
// check that relations hold for the 2 inputted instances and the folded one
r1cs.check_relaxed_instance_relation(&w1, &ci1).unwrap();
r1cs.check_relaxed_instance_relation(&w2, &ci2).unwrap();
r1cs.check_relaxed_instance_relation(&w3, &ci3).unwrap();
// check that folded commitments from folded instance (ci) are equal to folding the
// use folded rE, rW to commit w3
let ci3_expected = w3
.commit::<Pedersen<Projective>>(&pedersen_params, ci3.x.clone())
.unwrap();
assert_eq!(ci3_expected.cmE, ci3.cmE);
assert_eq!(ci3_expected.cmW, ci3.cmW);
// next equalities should hold since we started from two cmE of zero-vector E's
assert_eq!(ci3.cmE, cmT.mul(r));
assert_eq!(w3.E, vec_scalar_mul(&T, &r));
// NIFS.Verify_Folded_Instance:
NIFS::<Projective, Pedersen<Projective>>::verify_folded_instance(r, &ci1, &ci2, &ci3, &cmT)
.unwrap();
// init Prover's transcript
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
// init Verifier's transcript
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
// prove the ci3.cmE, ci3.cmW, cmT commitments
let cm_proofs = NIFS::<Projective, Pedersen<Projective>>::prove_commitments(
&mut transcript_p,
&pedersen_params,
&w3,
&ci3,
T,
&cmT,
)
.unwrap();
// verify the ci3.cmE, ci3.cmW, cmT commitments
assert_eq!(cm_proofs.len(), 3);
Pedersen::<Projective>::verify(
&pedersen_params,
&mut transcript_v,
ci3.cmE,
cm_proofs[0].clone(),
)
.unwrap();
Pedersen::<Projective>::verify(
&pedersen_params,
&mut transcript_v,
ci3.cmW,
cm_proofs[1].clone(),
)
.unwrap();
Pedersen::<Projective>::verify(
&pedersen_params,
&mut transcript_v,
cmT,
cm_proofs[2].clone(),
)
.unwrap();
}
#[test]
fn test_nifs_fold_loop() {
let r1cs = get_test_r1cs();
let z = get_test_z(3);
let (w, x) = r1cs.split_z(&z);
let mut rng = ark_std::test_rng();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, r1cs.A.n_cols);
// prepare the running instance
let mut running_instance_w = Witness::<Projective>::new(w.clone(), r1cs.A.n_rows);
let mut running_committed_instance = running_instance_w
.commit::<Pedersen<Projective>>(&pedersen_params, x)
.unwrap();
r1cs.check_relaxed_instance_relation(&running_instance_w, &running_committed_instance)
.unwrap();
let num_iters = 10;
for i in 0..num_iters {
// prepare the incomming instance
let incomming_instance_z = get_test_z(i + 4);
let (w, x) = r1cs.split_z(&incomming_instance_z);
let incomming_instance_w = Witness::<Projective>::new(w.clone(), r1cs.A.n_rows);
let incomming_committed_instance = incomming_instance_w
.commit::<Pedersen<Projective>>(&pedersen_params, x)
.unwrap();
r1cs.check_relaxed_instance_relation(
&incomming_instance_w,
&incomming_committed_instance,
)
.unwrap();
let r = Fr::rand(&mut rng); // folding challenge would come from the RO
// NIFS.P
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
&pedersen_params,
&r1cs,
&running_instance_w,
&running_committed_instance,
&incomming_instance_w,
&incomming_committed_instance,
)
.unwrap();
let (folded_w, _) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
r,
&running_instance_w,
&running_committed_instance,
&incomming_instance_w,
&incomming_committed_instance,
&T,
cmT,
)
.unwrap();
// NIFS.V
let folded_committed_instance = NIFS::<Projective, Pedersen<Projective>>::verify(
r,
&running_committed_instance,
&incomming_committed_instance,
&cmT,
);
r1cs.check_relaxed_instance_relation(&folded_w, &folded_committed_instance)
.unwrap();
// set running_instance for next loop iteration
running_instance_w = folded_w;
running_committed_instance = folded_committed_instance;
}
}
}

View File

@@ -0,0 +1,67 @@
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_std::{One, Zero};
use super::{CommittedInstance, Witness};
use crate::ccs::r1cs::R1CS;
use crate::Error;
/// NovaR1CS extends R1CS methods with Nova specific methods
pub trait NovaR1CS<C: CurveGroup> {
/// returns a dummy instance (Witness and CommittedInstance) for the current R1CS structure
fn dummy_instance(&self) -> (Witness<C>, CommittedInstance<C>);
/// checks the R1CS relation (un-relaxed) for the given Witness and CommittedInstance.
fn check_instance_relation(
&self,
W: &Witness<C>,
U: &CommittedInstance<C>,
) -> Result<(), Error>;
/// checks the Relaxed R1CS relation (corresponding to the current R1CS) for the given Witness
/// and CommittedInstance.
fn check_relaxed_instance_relation(
&self,
W: &Witness<C>,
U: &CommittedInstance<C>,
) -> Result<(), Error>;
}
impl<C: CurveGroup> NovaR1CS<C> for R1CS<C::ScalarField>
where
<C as Group>::ScalarField: Absorb,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
fn dummy_instance(&self) -> (Witness<C>, CommittedInstance<C>) {
let w_len = self.A.n_cols - 1 - self.l;
let w_dummy = Witness::<C>::new(vec![C::ScalarField::zero(); w_len], self.A.n_rows);
let u_dummy = CommittedInstance::<C>::dummy(self.l);
(w_dummy, u_dummy)
}
fn check_instance_relation(
&self,
W: &Witness<C>,
U: &CommittedInstance<C>,
) -> Result<(), Error> {
if U.cmE != C::zero() || U.u != C::ScalarField::one() {
return Err(Error::R1CSUnrelaxedFail);
}
let Z: Vec<C::ScalarField> = [vec![U.u], U.x.to_vec(), W.W.to_vec()].concat();
self.check_relation(&Z)
}
fn check_relaxed_instance_relation(
&self,
W: &Witness<C>,
U: &CommittedInstance<C>,
) -> Result<(), Error> {
let mut rel_r1cs = self.clone().relax();
rel_r1cs.u = U.u;
rel_r1cs.E = W.E.clone();
let Z: Vec<C::ScalarField> = [vec![U.u], U.x.to_vec(), W.W.to_vec()].concat();
rel_r1cs.check_relation(&Z)
}
}

View File

@@ -0,0 +1,599 @@
/// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_poly::{
univariate::{DensePolynomial, SparsePolynomial},
DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial,
};
use ark_std::log2;
use ark_std::{cfg_into_iter, Zero};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::marker::PhantomData;
use super::traits::ProtoGalaxyTranscript;
use super::utils::{all_powers, betas_star, exponential_powers};
use super::ProtoGalaxyError;
use super::{CommittedInstance, Witness};
use crate::ccs::r1cs::R1CS;
use crate::transcript::Transcript;
use crate::utils::{bit::bit_decompose, vec::*};
use crate::Error;
#[derive(Clone, Debug)]
/// Implements the protocol described in section 4 of
/// [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
pub struct Folding<C: CurveGroup> {
_phantom: PhantomData<C>,
}
impl<C: CurveGroup> Folding<C>
where
<C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: Absorb,
{
#![allow(clippy::type_complexity)]
/// implements the non-interactive Prover from the folding scheme described in section 4
pub fn prove(
transcript: &mut (impl Transcript<C> + ProtoGalaxyTranscript<C>),
r1cs: &R1CS<C::ScalarField>,
// running instance
instance: &CommittedInstance<C>,
w: &Witness<C::ScalarField>,
// incomming instances
vec_instances: &[CommittedInstance<C>],
vec_w: &[Witness<C::ScalarField>],
) -> Result<
(
CommittedInstance<C>,
Witness<C::ScalarField>,
Vec<C::ScalarField>, // F_X coeffs
Vec<C::ScalarField>, // K_X coeffs
),
Error,
> {
if vec_instances.len() != vec_w.len() {
return Err(Error::NotSameLength(
"vec_instances.len()".to_string(),
vec_instances.len(),
"vec_w.len()".to_string(),
vec_w.len(),
));
}
let d = 2; // for the moment hardcoded to 2 since it only supports R1CS
let k = vec_instances.len();
let t = instance.betas.len();
let n = r1cs.A.n_cols;
if w.w.len() != n {
return Err(Error::NotSameLength(
"w.w.len()".to_string(),
w.w.len(),
"n".to_string(),
n,
));
}
if log2(n) as usize != t {
return Err(Error::NotEqual);
}
if !(k + 1).is_power_of_two() {
return Err(Error::ProtoGalaxy(ProtoGalaxyError::WrongNumInstances(k)));
}
// absorb the committed instances
transcript.absorb_committed_instance(instance)?;
for ci in vec_instances.iter() {
transcript.absorb_committed_instance(ci)?;
}
let delta = transcript.get_challenge();
let deltas = exponential_powers(delta, t);
let f_w = eval_f(r1cs, &w.w)?;
// F(X)
let F_X: SparsePolynomial<C::ScalarField> =
calc_f_from_btree(&f_w, &instance.betas, &deltas).expect("Error calculating F[x]");
let F_X_dense = DensePolynomial::from(F_X.clone());
transcript.absorb_vec(&F_X_dense.coeffs);
let alpha = transcript.get_challenge();
// eval F(alpha)
let F_alpha = F_X.evaluate(&alpha);
// betas*
let betas_star = betas_star(&instance.betas, &deltas, alpha);
// sanity check: check that the new randomized instance (the original instance but with
// 'refreshed' randomness) satisfies the relation.
#[cfg(test)]
tests::check_instance(
r1cs,
&CommittedInstance {
phi: instance.phi,
betas: betas_star.clone(),
e: F_alpha,
},
w,
)?;
let ws: Vec<Vec<C::ScalarField>> = std::iter::once(w.w.clone())
.chain(
vec_w
.iter()
.map(|wj| {
if wj.w.len() != n {
return Err(Error::NotSameLength(
"wj.w.len()".to_string(),
wj.w.len(),
"n".to_string(),
n,
));
}
Ok(wj.w.clone())
})
.collect::<Result<Vec<Vec<C::ScalarField>>, Error>>()?,
)
.collect::<Vec<Vec<C::ScalarField>>>();
let H =
GeneralEvaluationDomain::<C::ScalarField>::new(k + 1).ok_or(Error::NewDomainFail)?;
let G_domain = GeneralEvaluationDomain::<C::ScalarField>::new((d * k) + 1)
.ok_or(Error::NewDomainFail)?;
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
// K(X) computation in a naive way, next iterations will compute K(X) as described in Claim
// 4.5 of the paper.
let mut G_evals: Vec<C::ScalarField> = vec![C::ScalarField::zero(); G_domain.size()];
for (hi, h) in G_domain.elements().enumerate() {
// each iteration evaluates G(h)
// inner = L_0(x) * w + \sum_k L_i(x) * w_j
let mut inner: Vec<C::ScalarField> = vec![C::ScalarField::zero(); ws[0].len()];
for (i, w) in ws.iter().enumerate() {
// Li_w_h = (Li(X)*wj)(h) = Li(h) * wj
let mut Liw_h: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w.len()];
for (j, wj) in w.iter().enumerate() {
Liw_h[j] = (&L_X[i] * *wj).evaluate(&h);
}
for j in 0..inner.len() {
inner[j] += Liw_h[j];
}
}
let f_ev = eval_f(r1cs, &inner)?;
let mut Gsum = C::ScalarField::zero();
for (i, f_ev_i) in f_ev.iter().enumerate() {
let pow_i_betas = pow_i(i, &betas_star);
let curr = pow_i_betas * f_ev_i;
Gsum += curr;
}
G_evals[hi] = Gsum;
}
let G_X: DensePolynomial<C::ScalarField> =
Evaluations::<C::ScalarField>::from_vec_and_domain(G_evals, G_domain).interpolate();
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
// K(X) = (G(X) - F(alpha)*L_0(X)) / Z(X)
// Notice that L0(X)*F(a) will be 0 in the native case (the instance of the first folding
// iteration case).
let L0_e = &L_X[0] * F_alpha;
let G_L0e = &G_X - &L0_e;
// Pending optimization: move division by Z_X to the prev loop
let (K_X, remainder) = G_L0e.divide_by_vanishing_poly(H).ok_or(Error::ProtoGalaxy(
ProtoGalaxyError::CouldNotDivideByVanishing,
))?;
if !remainder.is_zero() {
return Err(Error::ProtoGalaxy(ProtoGalaxyError::RemainderNotZero));
}
transcript.absorb_vec(&K_X.coeffs);
let gamma = transcript.get_challenge();
let e_star =
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma);
for i in 0..k {
phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma);
}
let mut w_star: Vec<C::ScalarField> = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma));
let mut r_w_star: C::ScalarField = w.r_w * L_X[0].evaluate(&gamma);
for i in 0..k {
let L_X_at_i1 = L_X[i + 1].evaluate(&gamma);
w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_at_i1))?;
r_w_star += vec_w[i].r_w * L_X_at_i1;
}
Ok((
CommittedInstance {
betas: betas_star,
phi: phi_star,
e: e_star,
},
Witness {
w: w_star,
r_w: r_w_star,
},
F_X_dense.coeffs,
K_X.coeffs,
))
}
/// implements the non-interactive Verifier from the folding scheme described in section 4
pub fn verify(
transcript: &mut (impl Transcript<C> + ProtoGalaxyTranscript<C>),
r1cs: &R1CS<C::ScalarField>,
// running instance
instance: &CommittedInstance<C>,
// incomming instances
vec_instances: &[CommittedInstance<C>],
// polys from P
F_coeffs: Vec<C::ScalarField>,
K_coeffs: Vec<C::ScalarField>,
) -> Result<CommittedInstance<C>, Error> {
let t = instance.betas.len();
let n = r1cs.A.n_cols;
// absorb the committed instances
transcript.absorb_committed_instance(instance)?;
for ci in vec_instances.iter() {
transcript.absorb_committed_instance(ci)?;
}
let delta = transcript.get_challenge();
let deltas = exponential_powers(delta, t);
transcript.absorb_vec(&F_coeffs);
let alpha = transcript.get_challenge();
let alphas = all_powers(alpha, n);
// F(alpha) = e + \sum_t F_i * alpha^i
let mut F_alpha = instance.e;
for (i, F_i) in F_coeffs.iter().skip(1).enumerate() {
F_alpha += *F_i * alphas[i + 1];
}
let betas_star = betas_star(&instance.betas, &deltas, alpha);
let k = vec_instances.len();
let H =
GeneralEvaluationDomain::<C::ScalarField>::new(k + 1).ok_or(Error::NewDomainFail)?;
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
let K_X: DensePolynomial<C::ScalarField> =
DensePolynomial::<C::ScalarField>::from_coefficients_vec(K_coeffs);
transcript.absorb_vec(&K_X.coeffs);
let gamma = transcript.get_challenge();
let e_star =
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma);
for i in 0..k {
phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma);
}
// return the folded instance
Ok(CommittedInstance {
betas: betas_star,
phi: phi_star,
e: e_star,
})
}
}
// naive impl of pow_i for betas, assuming that betas=(b, b^2, b^4, ..., b^{2^{t-1}})
fn pow_i<F: PrimeField>(i: usize, betas: &Vec<F>) -> F {
// WIP check if makes more sense to do it with ifs instead of arithmetic
let n = 2_u64.pow(betas.len() as u32);
let b = bit_decompose(i as u64, n as usize);
let mut r: F = F::one();
for (j, beta_j) in betas.iter().enumerate() {
let mut b_j = F::zero();
if b[j] {
b_j = F::one();
}
r *= (F::one() - b_j) + b_j * beta_j;
}
r
}
/// calculates F[x] using the optimized binary-tree technique
/// described in Claim 4.4
/// of [Protogalaxy](https://eprint.iacr.org/2023/1106.pdf)
fn calc_f_from_btree<F: PrimeField>(
fw: &[F],
betas: &[F],
deltas: &[F],
) -> Result<SparsePolynomial<F>, Error> {
let fw_len = fw.len();
let betas_len = betas.len();
let deltas_len = deltas.len();
// ensure our binary tree is full
if !fw_len.is_power_of_two() {
return Err(Error::ProtoGalaxy(ProtoGalaxyError::BTreeNotFull(fw_len)));
}
if betas_len != deltas_len {
return Err(Error::ProtoGalaxy(ProtoGalaxyError::WrongLenBetas(
betas_len, deltas_len,
)));
}
let mut layers: Vec<Vec<SparsePolynomial<F>>> = Vec::new();
let leaves: Vec<SparsePolynomial<F>> = fw
.iter()
.copied()
.map(|e| SparsePolynomial::<F>::from_coefficients_slice(&[(0, e)]))
.collect();
layers.push(leaves.to_vec());
let mut currentNodes = leaves.clone();
while currentNodes.len() > 1 {
let index = layers.len();
layers.push(vec![]);
for (i, ni) in currentNodes.iter().enumerate().step_by(2) {
let left = ni.clone();
let right = SparsePolynomial::<F>::from_coefficients_vec(vec![
(0, betas[layers.len() - 2]),
(1, deltas[layers.len() - 2]),
])
.mul(&currentNodes[i + 1]);
layers[index].push(left + right);
}
currentNodes = layers[index].clone();
}
let root_index = layers.len() - 1;
Ok(layers[root_index][0].clone())
}
// lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300
fn lagrange_polys<F: PrimeField>(domain_n: GeneralEvaluationDomain<F>) -> Vec<DensePolynomial<F>> {
let mut lagrange_polynomials: Vec<DensePolynomial<F>> = Vec::new();
for i in 0..domain_n.size() {
let evals: Vec<F> = cfg_into_iter!(0..domain_n.size())
.map(|k| if k == i { F::one() } else { F::zero() })
.collect();
lagrange_polynomials.push(Evaluations::from_vec_and_domain(evals, domain_n).interpolate());
}
lagrange_polynomials
}
// f(w) in R1CS context. For the moment we use R1CS, in the future we will abstract this with a
// trait
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, w: &[F]) -> Result<Vec<F>, Error> {
let Az = mat_vec_mul_sparse(&r1cs.A, w)?;
let Bz = mat_vec_mul_sparse(&r1cs.B, w)?;
let Cz = mat_vec_mul_sparse(&r1cs.C, w)?;
let AzBz = hadamard(&Az, &Bz)?;
vec_sub(&AzBz, &Cz)
}
#[cfg(test)]
mod tests {
use super::*;
use ark_pallas::{Fr, Projective};
use ark_std::UniformRand;
use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z};
use crate::commitment::{pedersen::Pedersen, CommitmentProver};
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
pub(crate) fn check_instance<C: CurveGroup>(
r1cs: &R1CS<C::ScalarField>,
instance: &CommittedInstance<C>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
if instance.betas.len() != log2(w.w.len()) as usize {
return Err(Error::NotSameLength(
"instance.betas.len()".to_string(),
instance.betas.len(),
"log2(w.w.len())".to_string(),
log2(w.w.len()) as usize,
));
}
let f_w = eval_f(r1cs, &w.w)?; // f(w)
let mut r = C::ScalarField::zero();
for (i, f_w_i) in f_w.iter().enumerate() {
r += pow_i(i, &instance.betas) * f_w_i;
}
if instance.e == r {
return Ok(());
}
Err(Error::NotSatisfied)
}
#[test]
fn test_pow_i() {
let mut rng = ark_std::test_rng();
let t = 4;
let n = 16;
let beta = Fr::rand(&mut rng);
let betas = exponential_powers(beta, t);
let not_betas = all_powers(beta, n);
#[allow(clippy::needless_range_loop)]
for i in 0..n {
assert_eq!(pow_i(i, &betas), not_betas[i]);
}
}
#[test]
fn test_eval_f() {
let r1cs = get_test_r1cs::<Fr>();
let mut z = get_test_z::<Fr>(3);
let f_w = eval_f(&r1cs, &z).unwrap();
assert!(is_zero_vec(&f_w));
z[1] = Fr::from(111);
let f_w = eval_f(&r1cs, &z).unwrap();
assert!(!is_zero_vec(&f_w));
}
// k represents the number of instances to be fold, appart from the running instance
#[allow(clippy::type_complexity)]
fn prepare_inputs(
k: usize,
) -> (
Witness<Fr>,
CommittedInstance<Projective>,
Vec<Witness<Fr>>,
Vec<CommittedInstance<Projective>>,
) {
let mut rng = ark_std::test_rng();
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, 100); // 100 is wip, will get it from actual vec
let z = get_test_z::<Fr>(3);
let mut zs: Vec<Vec<Fr>> = Vec::new();
for i in 0..k {
let z_i = get_test_z::<Fr>(i + 4);
zs.push(z_i);
}
let n = z.len();
let t = log2(n) as usize;
let beta = Fr::rand(&mut rng);
let betas = exponential_powers(beta, t);
let witness = Witness::<Fr> {
w: z.clone(),
r_w: Fr::rand(&mut rng),
};
let phi =
Pedersen::<Projective>::commit(&pedersen_params, &witness.w, &witness.r_w).unwrap();
let instance = CommittedInstance::<Projective> {
phi,
betas: betas.clone(),
e: Fr::zero(),
};
// same for the other instances
let mut witnesses: Vec<Witness<Fr>> = Vec::new();
let mut instances: Vec<CommittedInstance<Projective>> = Vec::new();
#[allow(clippy::needless_range_loop)]
for i in 0..k {
let witness_i = Witness::<Fr> {
w: zs[i].clone(),
r_w: Fr::rand(&mut rng),
};
let phi_i =
Pedersen::<Projective>::commit(&pedersen_params, &witness_i.w, &witness_i.r_w)
.unwrap();
let instance_i = CommittedInstance::<Projective> {
phi: phi_i,
betas: betas.clone(),
e: Fr::zero(),
};
witnesses.push(witness_i);
instances.push(instance_i);
}
(witness, instance, witnesses, instances)
}
#[test]
fn test_fold_native_case() {
let k = 7;
let (witness, instance, witnesses, instances) = prepare_inputs(k);
let r1cs = get_test_r1cs::<Fr>();
// init Prover & Verifier's transcript
let poseidon_config = poseidon_test_config::<Fr>();
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
let (folded_instance, folded_witness, F_coeffs, K_coeffs) = Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&instance,
&witness,
&instances,
&witnesses,
)
.unwrap();
// veriier
let folded_instance_v = Folding::<Projective>::verify(
&mut transcript_v,
&r1cs,
&instance,
&instances,
F_coeffs,
K_coeffs,
)
.unwrap();
// check that prover & verifier folded instances are the same values
assert_eq!(folded_instance.phi, folded_instance_v.phi);
assert_eq!(folded_instance.betas, folded_instance_v.betas);
assert_eq!(folded_instance.e, folded_instance_v.e);
assert!(!folded_instance.e.is_zero());
// check that the folded instance satisfies the relation
check_instance(&r1cs, &folded_instance, &folded_witness).unwrap();
}
#[test]
fn test_fold_various_iterations() {
let r1cs = get_test_r1cs::<Fr>();
// init Prover & Verifier's transcript
let poseidon_config = poseidon_test_config::<Fr>();
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
let (mut running_witness, mut running_instance, _, _) = prepare_inputs(0);
// fold k instances on each of num_iters iterations
let k = 7;
let num_iters = 10;
for _ in 0..num_iters {
// generate the instances to be fold
let (_, _, witnesses, instances) = prepare_inputs(k);
let (folded_instance, folded_witness, F_coeffs, K_coeffs) =
Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&running_instance,
&running_witness,
&instances,
&witnesses,
)
.unwrap();
// veriier
let folded_instance_v = Folding::<Projective>::verify(
&mut transcript_v,
&r1cs,
&running_instance,
&instances,
F_coeffs,
K_coeffs,
)
.unwrap();
// check that prover & verifier folded instances are the same values
assert_eq!(folded_instance.phi, folded_instance_v.phi);
assert_eq!(folded_instance.betas, folded_instance_v.betas);
assert_eq!(folded_instance.e, folded_instance_v.e);
assert!(!folded_instance.e.is_zero());
// check that the folded instance satisfies the relation
check_instance(&r1cs, &folded_instance, &folded_witness).unwrap();
running_witness = folded_witness;
running_instance = folded_instance;
}
}
}

View File

@@ -0,0 +1,35 @@
/// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use thiserror::Error;
pub mod folding;
pub mod traits;
pub(crate) mod utils;
#[derive(Clone, Debug)]
pub struct CommittedInstance<C: CurveGroup> {
phi: C,
betas: Vec<C::ScalarField>,
e: C::ScalarField,
}
#[derive(Clone, Debug)]
pub struct Witness<F: PrimeField> {
w: Vec<F>,
r_w: F,
}
#[derive(Debug, Error, PartialEq)]
pub enum ProtoGalaxyError {
#[error("The remainder from G(X)-F(α)*L_0(X)) / Z(X) should be zero")]
RemainderNotZero,
#[error("Could not divide by vanishing polynomial")]
CouldNotDivideByVanishing,
#[error("The number of incoming instances + 1 should be a power of two, current number of instances: {0}")]
WrongNumInstances(usize),
#[error("The number of incoming items should be a power of two, current number of coefficients: {0}")]
BTreeNotFull(usize),
#[error("The lengths of β and δ do not equal: |β| = {0}, |δ|={0}")]
WrongLenBetas(usize, usize),
}

View File

@@ -0,0 +1,23 @@
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use super::CommittedInstance;
use crate::transcript::{poseidon::PoseidonTranscript, Transcript};
use crate::Error;
/// ProtoGalaxyTranscript extends [`Transcript`] with the method to absorb ProtoGalaxy's
/// CommittedInstance.
pub trait ProtoGalaxyTranscript<C: CurveGroup>: Transcript<C> {
fn absorb_committed_instance(&mut self, ci: &CommittedInstance<C>) -> Result<(), Error> {
self.absorb_point(&ci.phi)?;
self.absorb_vec(&ci.betas);
self.absorb(&ci.e);
Ok(())
}
}
// Implements ProtoGalaxyTranscript for PoseidonTranscript
impl<C: CurveGroup> ProtoGalaxyTranscript<C> for PoseidonTranscript<C> where
<C as Group>::ScalarField: Absorb
{
}

View File

@@ -0,0 +1,32 @@
use ark_ff::PrimeField;
// returns (b, b^2, b^4, ..., b^{2^{t-1}})
pub fn exponential_powers<F: PrimeField>(b: F, t: usize) -> Vec<F> {
let mut r = vec![F::zero(); t];
r[0] = b;
for i in 1..t {
r[i] = r[i - 1].square();
}
r
}
pub fn all_powers<F: PrimeField>(a: F, n: usize) -> Vec<F> {
let mut r = vec![F::zero(); n];
for (i, r_i) in r.iter_mut().enumerate() {
*r_i = a.pow([i as u64]);
}
r
}
// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ
pub fn betas_star<F: PrimeField>(betas: &[F], deltas: &[F], alpha: F) -> Vec<F> {
betas
.iter()
.zip(
deltas
.iter()
.map(|delta_i| alpha * delta_i)
.collect::<Vec<F>>(),
)
.map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha)
.collect()
}

View File

@@ -0,0 +1,226 @@
use std::{error::Error, fs::File, io::BufReader, marker::PhantomData, path::PathBuf};
use color_eyre::Result;
use num_bigint::BigInt;
use ark_circom::{circom::r1cs_reader, WitnessCalculator};
use ark_ec::pairing::Pairing;
use ark_ff::PrimeField;
use crate::ccs::r1cs::R1CS;
use crate::utils::vec::SparseMatrix;
// Define the sparse matrices on PrimeFiled.
pub type Constraints<F> = (ConstraintVec<F>, ConstraintVec<F>, ConstraintVec<F>);
pub type ConstraintVec<F> = Vec<(usize, F)>;
type ExtractedConstraints<F> = (Vec<Constraints<F>>, usize, usize);
pub type ExtractedConstraintsResult<F> = Result<ExtractedConstraints<F>, Box<dyn Error>>;
pub type R1CSandZ<F> = (R1CS<F>, Vec<F>);
// A struct that wraps Circom functionalities, allowing for extraction of R1CS and witnesses
// based on file paths to Circom's .r1cs and .wasm.
pub struct CircomWrapper<E: Pairing> {
r1cs_filepath: PathBuf,
wasm_filepath: PathBuf,
_marker: PhantomData<E>,
}
impl<E: Pairing> CircomWrapper<E> {
// Creates a new instance of the CircomWrapper with the file paths.
pub fn new(r1cs_filepath: PathBuf, wasm_filepath: PathBuf) -> Self {
CircomWrapper {
r1cs_filepath,
wasm_filepath,
_marker: PhantomData,
}
}
// Aggregates multiple functions to obtain R1CS and Z as defined in folding-schemes from Circom.
pub fn extract_r1cs_and_z(
&self,
inputs: &[(String, Vec<BigInt>)],
) -> Result<R1CSandZ<E::ScalarField>, Box<dyn Error>> {
let (constraints, pub_io_len, num_variables) = self.extract_constraints_from_r1cs()?;
let witness = self.calculate_witness(inputs)?;
self.circom_to_folding_schemes_r1cs_and_z(constraints, &witness, pub_io_len, num_variables)
}
// Extracts constraints from the r1cs file.
pub fn extract_constraints_from_r1cs(&self) -> ExtractedConstraintsResult<E::ScalarField>
where
E: Pairing,
{
// Opens the .r1cs file and create a reader.
let file = File::open(&self.r1cs_filepath)?;
let reader = BufReader::new(file);
// Reads the R1CS file and extract the constraints directly.
let r1cs_file = r1cs_reader::R1CSFile::<E>::new(reader)?;
let pub_io_len = (r1cs_file.header.n_pub_in + r1cs_file.header.n_pub_out) as usize;
let r1cs = r1cs_reader::R1CS::<E>::from(r1cs_file);
let num_variables = r1cs.num_variables;
let constraints: Vec<Constraints<E::ScalarField>> = r1cs.constraints;
Ok((constraints, pub_io_len, num_variables))
}
// Converts a set of constraints from ark-circom into R1CS format of folding-schemes.
pub fn convert_to_folding_schemes_r1cs<F>(
&self,
constraints: Vec<Constraints<F>>,
pub_io_len: usize,
num_variables: usize,
) -> R1CS<F>
where
F: PrimeField,
{
let mut a_matrix: Vec<Vec<(F, usize)>> = Vec::new();
let mut b_matrix: Vec<Vec<(F, usize)>> = Vec::new();
let mut c_matrix: Vec<Vec<(F, usize)>> = Vec::new();
let n_rows = constraints.len();
for (ai, bi, ci) in constraints {
a_matrix.push(
ai.into_iter()
.map(|(index, scalar)| (scalar, index))
.collect(),
);
b_matrix.push(
bi.into_iter()
.map(|(index, scalar)| (scalar, index))
.collect(),
);
c_matrix.push(
ci.into_iter()
.map(|(index, scalar)| (scalar, index))
.collect(),
);
}
let l = pub_io_len;
let n_cols = num_variables;
let A = SparseMatrix {
n_rows,
n_cols,
coeffs: a_matrix,
};
let B = SparseMatrix {
n_rows,
n_cols,
coeffs: b_matrix,
};
let C = SparseMatrix {
n_rows,
n_cols,
coeffs: c_matrix,
};
R1CS::<F> { l, A, B, C }
}
// Calculates the witness given the Wasm filepath and inputs.
pub fn calculate_witness(&self, inputs: &[(String, Vec<BigInt>)]) -> Result<Vec<BigInt>> {
let mut calculator = WitnessCalculator::new(&self.wasm_filepath)?;
calculator.calculate_witness(inputs.iter().cloned(), true)
}
// Converts a num_bigint input to `PrimeField`'s BigInt.
pub fn num_bigint_to_ark_bigint<F: PrimeField>(
&self,
value: &BigInt,
) -> Result<F::BigInt, Box<dyn Error>> {
let big_uint = value
.to_biguint()
.ok_or_else(|| "BigInt is negative".to_string())?;
F::BigInt::try_from(big_uint).map_err(|_| "BigInt conversion failed".to_string().into())
}
// Converts R1CS constraints and witness from ark-circom format
// into folding-schemes R1CS and z format.
pub fn circom_to_folding_schemes_r1cs_and_z<F>(
&self,
constraints: Vec<Constraints<F>>,
witness: &[BigInt],
pub_io_len: usize,
num_variables: usize,
) -> Result<(R1CS<F>, Vec<F>), Box<dyn Error>>
where
F: PrimeField,
{
let folding_schemes_r1cs =
self.convert_to_folding_schemes_r1cs(constraints, pub_io_len, num_variables);
let z: Result<Vec<F>, _> = witness
.iter()
.map(|big_int| {
let ark_big_int = self.num_bigint_to_ark_bigint::<F>(big_int)?;
F::from_bigint(ark_big_int).ok_or_else(|| {
"Failed to convert bigint to field element"
.to_string()
.into()
})
})
.collect();
z.map(|z| (folding_schemes_r1cs, z))
}
}
#[cfg(test)]
mod tests {
use crate::frontend::circom::CircomWrapper;
use ark_bn254::Bn254;
use num_bigint::BigInt;
use std::env;
/*
test_circuit represents 35 = x^3 + x + 5 in test_folder/test_circuit.circom.
In the test_circom_conversion_success function, x is assigned the value 3, which satisfies the R1CS correctly.
In the test_circom_conversion_failure function, x is assigned the value 6, which doesn't satisfy the R1CS.
*/
/*
To generate .r1cs and .wasm files, run the below command in the terminal.
bash ./src/frontend/circom/test_folder/compile.sh
*/
fn test_circom_conversion_logic(expect_success: bool, inputs: Vec<(String, Vec<BigInt>)>) {
let current_dir = env::current_dir().unwrap();
let base_path = current_dir.join("src/frontend/circom/test_folder");
let r1cs_filepath = base_path.join("test_circuit.r1cs");
let wasm_filepath = base_path.join("test_circuit_js/test_circuit.wasm");
assert!(r1cs_filepath.exists());
assert!(wasm_filepath.exists());
let circom_wrapper = CircomWrapper::<Bn254>::new(r1cs_filepath, wasm_filepath);
let (r1cs, z) = circom_wrapper
.extract_r1cs_and_z(&inputs)
.expect("Error processing input");
// Checks the relationship of R1CS.
let check_result = std::panic::catch_unwind(|| {
r1cs.check_relation(&z).unwrap();
});
match (expect_success, check_result) {
(true, Ok(_)) => {}
(false, Err(_)) => {}
(true, Err(_)) => panic!("Expected success but got a failure."),
(false, Ok(_)) => panic!("Expected a failure but got success."),
}
}
#[test]
fn test_circom_conversion() {
// expect it to pass for correct inputs.
test_circom_conversion_logic(true, vec![("step_in".to_string(), vec![BigInt::from(3)])]);
// expect it to fail for incorrect inputs.
test_circom_conversion_logic(false, vec![("step_in".to_string(), vec![BigInt::from(7)])]);
}
}

View File

@@ -0,0 +1,2 @@
#!/bin/bash
circom ./folding-schemes/src/frontend/circom/test_folder/test_circuit.circom --r1cs --wasm --prime bn128 --output ./folding-schemes/src/frontend/circom/test_folder/

View File

@@ -0,0 +1,13 @@
pragma circom 2.0.3;
template Example () {
signal input step_in;
signal output step_out;
signal temp;
temp <== step_in * step_in;
step_out <== temp * step_in + step_in + 5;
step_out === 35;
}
component main = Example();

View File

@@ -0,0 +1,171 @@
pub mod circom;
use crate::Error;
use ark_ff::PrimeField;
use ark_r1cs_std::fields::fp::FpVar;
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_std::fmt::Debug;
/// FCircuit defines the trait of the circuit of the F function, which is the one being folded (ie.
/// inside the agmented F' function).
/// The parameter z_i denotes the current state, and z_{i+1} denotes the next state after applying
/// the step.
pub trait FCircuit<F: PrimeField>: Clone + Copy + Debug {
type Params: Debug;
/// returns a new FCircuit instance
fn new(params: Self::Params) -> Self;
/// computes the next state values in place, assigning z_{i+1} into z_i, and computing the new
/// z_{i+1}
fn step_native(
// this method uses self, so that each FCircuit implementation (and different frontends)
// can hold a state if needed to store data to compute the next state.
self,
z_i: Vec<F>,
) -> Result<Vec<F>, Error>;
/// generates the constraints for the step of F for the given z_i
fn generate_step_constraints(
// this method uses self, so that each FCircuit implementation (and different frontends)
// can hold a state if needed to store data to generate the constraints.
self,
cs: ConstraintSystemRef<F>,
z_i: Vec<FpVar<F>>,
) -> Result<Vec<FpVar<F>>, SynthesisError>;
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::Fr;
use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget};
use ark_relations::r1cs::{
ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, SynthesisError,
};
use core::marker::PhantomData;
/// CubicFCircuit is a struct that implements the FCircuit trait, for the R1CS example circuit
/// from https://www.vitalik.ca/general/2016/12/10/qap.html, which checks `x^3 + x + 5 = y`. It
/// has 2 public inputs which are used as the state. `z_i` is used as `x`, and `z_{i+1}` is
/// used as `y`, and at the next step, `z_{i+1}` will be assigned to `z_i`, and a new `z+{i+1}`
/// will be computted.
#[derive(Clone, Copy, Debug)]
pub struct CubicFCircuit<F: PrimeField> {
_f: PhantomData<F>,
}
impl<F: PrimeField> FCircuit<F> for CubicFCircuit<F> {
type Params = ();
fn new(_params: Self::Params) -> Self {
Self { _f: PhantomData }
}
fn step_native(self, z_i: Vec<F>) -> Result<Vec<F>, Error> {
Ok(vec![z_i[0] * z_i[0] * z_i[0] + z_i[0] + F::from(5_u32)])
}
fn generate_step_constraints(
self,
cs: ConstraintSystemRef<F>,
z_i: Vec<FpVar<F>>,
) -> Result<Vec<FpVar<F>>, SynthesisError> {
let five = FpVar::<F>::new_constant(cs.clone(), F::from(5u32))?;
let z_i = z_i[0].clone();
Ok(vec![&z_i * &z_i * &z_i + &z_i + &five])
}
}
/// CustomFCircuit is a circuit that has the number of constraints specified in the
/// `n_constraints` parameter. Note that the generated circuit will have very sparse matrices.
#[derive(Clone, Copy, Debug)]
pub struct CustomFCircuit<F: PrimeField> {
_f: PhantomData<F>,
pub n_constraints: usize,
}
impl<F: PrimeField> FCircuit<F> for CustomFCircuit<F> {
type Params = usize;
fn new(params: Self::Params) -> Self {
Self {
_f: PhantomData,
n_constraints: params,
}
}
fn step_native(self, z_i: Vec<F>) -> Result<Vec<F>, Error> {
let mut z_i1 = F::one();
for _ in 0..self.n_constraints - 1 {
z_i1 *= z_i[0];
}
Ok(vec![z_i1])
}
fn generate_step_constraints(
self,
cs: ConstraintSystemRef<F>,
z_i: Vec<FpVar<F>>,
) -> Result<Vec<FpVar<F>>, SynthesisError> {
let mut z_i1 = FpVar::<F>::new_witness(cs.clone(), || Ok(F::one()))?;
for _ in 0..self.n_constraints - 1 {
z_i1 *= z_i[0].clone();
}
Ok(vec![z_i1])
}
}
/// WrapperCircuit is a circuit that wraps any circuit that implements the FCircuit trait. This
/// is used to test the `FCircuit.generate_step_constraints` method. This is a similar wrapping
/// than the one done in the `AugmentedFCircuit`, but without adding all the extra constraints
/// of the AugmentedF circuit logic, in order to run lighter tests when we're not interested in
/// the the AugmentedF logic but in the wrapping of the circuits.
pub struct WrapperCircuit<F: PrimeField, FC: FCircuit<F>> {
pub FC: FC, // F circuit
pub z_i: Option<Vec<F>>,
pub z_i1: Option<Vec<F>>,
}
impl<F, FC> ConstraintSynthesizer<F> for WrapperCircuit<F, FC>
where
F: PrimeField,
FC: FCircuit<F>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> Result<(), SynthesisError> {
let z_i = Vec::<FpVar<F>>::new_witness(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![F::zero()]))
})?;
let z_i1 = Vec::<FpVar<F>>::new_input(cs.clone(), || {
Ok(self.z_i1.unwrap_or(vec![F::zero()]))
})?;
let computed_z_i1 = self.FC.generate_step_constraints(cs.clone(), z_i.clone())?;
computed_z_i1.enforce_equal(&z_i1)?;
Ok(())
}
}
#[test]
fn test_testfcircuit() {
let cs = ConstraintSystem::<Fr>::new_ref();
let F_circuit = CubicFCircuit::<Fr>::new(());
let wrapper_circuit = WrapperCircuit::<Fr, CubicFCircuit<Fr>> {
FC: F_circuit,
z_i: Some(vec![Fr::from(3_u32)]),
z_i1: Some(vec![Fr::from(35_u32)]),
};
wrapper_circuit.generate_constraints(cs.clone()).unwrap();
assert_eq!(cs.num_constraints(), 3);
}
#[test]
fn test_customtestfcircuit() {
let cs = ConstraintSystem::<Fr>::new_ref();
let n_constraints = 1000;
let custom_circuit = CustomFCircuit::<Fr>::new(n_constraints);
let z_i = vec![Fr::from(5_u32)];
let wrapper_circuit = WrapperCircuit::<Fr, CustomFCircuit<Fr>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(z_i).unwrap()),
};
wrapper_circuit.generate_constraints(cs.clone()).unwrap();
assert_eq!(cs.num_constraints(), n_constraints);
}
}

156
folding-schemes/src/lib.rs Normal file
View File

@@ -0,0 +1,156 @@
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(clippy::upper_case_acronyms)]
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_std::rand::CryptoRng;
use ark_std::{fmt::Debug, rand::RngCore};
use thiserror::Error;
use crate::frontend::FCircuit;
pub mod ccs;
pub mod commitment;
pub mod constants;
pub mod folding;
pub mod frontend;
pub mod transcript;
pub mod utils;
#[derive(Debug, Error)]
pub enum Error {
#[error("ark_relations::r1cs::SynthesisError")]
SynthesisError(#[from] ark_relations::r1cs::SynthesisError),
#[error("ark_serialize::SerializationError")]
SerializationError(#[from] ark_serialize::SerializationError),
#[error("ark_poly_commit::Error")]
PolyCommitError(#[from] ark_poly_commit::Error),
#[error("crate::utils::espresso::virtual_polynomial::ArithErrors")]
ArithError(#[from] utils::espresso::virtual_polynomial::ArithErrors),
#[error("{0}")]
Other(String),
#[error("Relation not satisfied")]
NotSatisfied,
#[error("Not equal")]
NotEqual,
#[error("Vectors should have the same length ({0}: {1}, {2}: {3})")]
NotSameLength(String, usize, String, usize),
#[error("Vector's length ({0}) is not the expected ({1})")]
NotExpectedLength(usize, usize),
#[error("Can not be empty")]
Empty,
#[error("Pedersen parameters length is not suficient (generators.len={0} < vector.len={1} unsatisfied)")]
PedersenParamsLen(usize, usize),
#[error("Commitment verification failed")]
CommitmentVerificationFail,
#[error("IVC verification failed")]
IVCVerificationFail,
#[error("R1CS instance is expected to not be relaxed")]
R1CSUnrelaxedFail,
#[error("Could not find the inner ConstraintSystem")]
NoInnerConstraintSystem,
#[error("Sum-check prove failed: {0}")]
SumCheckProveError(String),
#[error("Sum-check verify failed: {0}")]
SumCheckVerifyError(String),
#[error("Value out of bounds")]
OutOfBounds,
#[error("Could not construct the Evaluation Domain")]
NewDomainFail,
#[error("Feature '{0}' not supported yet")]
NotSupportedYet(String),
#[error(transparent)]
ProtoGalaxy(folding::protogalaxy::ProtoGalaxyError),
}
/// FoldingScheme defines trait that is implemented by the diverse folding schemes. It is defined
/// over a cycle of curves (C1, C2), where:
/// - C1 is the main curve, which ScalarField we use as our F for al the field operations
/// - C2 is the auxiliary curve, which we use for the commitments, whose BaseField (for point
/// coordinates) are in the C1::ScalarField.
/// In other words, C1.Fq == C2.Fr, and C1.Fr == C2.Fq.
pub trait FoldingScheme<C1: CurveGroup, C2: CurveGroup, FC>: Clone + Debug
where
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
C2::BaseField: PrimeField,
FC: FCircuit<C1::ScalarField>,
{
type PreprocessorParam: Debug;
type ProverParam: Debug;
type VerifierParam: Debug;
type CommittedInstanceWithWitness: Debug;
type CFCommittedInstanceWithWitness: Debug; // CycleFold CommittedInstance & Witness
fn preprocess(
prep_param: &Self::PreprocessorParam,
) -> Result<(Self::ProverParam, Self::VerifierParam), Error>;
fn init(
pp: &Self::ProverParam,
step_circuit: FC,
z_0: Vec<C1::ScalarField>, // initial state
) -> Result<Self, Error>;
fn prove_step(&mut self) -> Result<(), Error>;
// returns the state at the current step
fn state(&self) -> Vec<C1::ScalarField>;
// returns the instances at the current step
fn instances(
&self,
) -> (
Self::CommittedInstanceWithWitness,
Self::CommittedInstanceWithWitness,
Self::CFCommittedInstanceWithWitness,
);
fn verify(
vp: Self::VerifierParam,
z_0: Vec<C1::ScalarField>, // initial state
z_i: Vec<C1::ScalarField>, // last state
// number of steps between the initial state and the last state
num_steps: C1::ScalarField,
running_instance: Self::CommittedInstanceWithWitness,
incomming_instance: Self::CommittedInstanceWithWitness,
cyclefold_instance: Self::CFCommittedInstanceWithWitness,
) -> Result<(), Error>;
}
pub trait Decider<
C1: CurveGroup,
C2: CurveGroup,
FC: FCircuit<C1::ScalarField>,
FS: FoldingScheme<C1, C2, FC>,
> where
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
C2::BaseField: PrimeField,
{
type ProverParam: Clone;
type Proof: Clone;
type VerifierParam;
type PublicInput: Debug;
type CommittedInstanceWithWitness: Debug;
type CommittedInstance: Clone + Debug;
fn prove(
pp: &Self::ProverParam,
rng: impl RngCore + CryptoRng,
folding_scheme: FS,
) -> Result<Self::Proof, Error>;
fn verify(
vp: &Self::VerifierParam,
i: C1::ScalarField,
z_0: Vec<C1::ScalarField>,
z_i: Vec<C1::ScalarField>,
running_instance: &Self::CommittedInstance,
proof: Self::Proof,
// returns `Result<bool, Error>` to differentiate between an error occurred while performing
// the verification steps, and the verification logic of the scheme not passing.
) -> Result<bool, Error>;
}

View File

@@ -0,0 +1,34 @@
use crate::Error;
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_std::fmt::Debug;
pub mod poseidon;
pub trait Transcript<C: CurveGroup> {
type TranscriptConfig: Debug;
fn new(config: &Self::TranscriptConfig) -> Self;
fn absorb(&mut self, v: &C::ScalarField);
fn absorb_vec(&mut self, v: &[C::ScalarField]);
fn absorb_point(&mut self, v: &C) -> Result<(), Error>;
fn get_challenge(&mut self) -> C::ScalarField;
/// get_challenge_nbits returns a field element of size nbits
fn get_challenge_nbits(&mut self, nbits: usize) -> Vec<bool>;
fn get_challenges(&mut self, n: usize) -> Vec<C::ScalarField>;
}
pub trait TranscriptVar<F: PrimeField> {
type TranscriptVarConfig: Debug;
fn new(cs: ConstraintSystemRef<F>, poseidon_config: &Self::TranscriptVarConfig) -> Self;
fn absorb(&mut self, v: FpVar<F>) -> Result<(), SynthesisError>;
fn absorb_vec(&mut self, v: &[FpVar<F>]) -> Result<(), SynthesisError>;
fn get_challenge(&mut self) -> Result<FpVar<F>, SynthesisError>;
/// returns the bit representation of the challenge, we use its output in-circuit for the
/// `GC.scalar_mul_le` method.
fn get_challenge_nbits(&mut self, nbits: usize) -> Result<Vec<Boolean<F>>, SynthesisError>;
fn get_challenges(&mut self, n: usize) -> Result<Vec<FpVar<F>>, SynthesisError>;
}

View File

@@ -0,0 +1,219 @@
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_ff::{BigInteger, Field, PrimeField};
use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_std::{One, Zero};
use crate::transcript::Transcript;
use crate::Error;
use super::TranscriptVar;
/// PoseidonTranscript implements the Transcript trait using the Poseidon hash
pub struct PoseidonTranscript<C: CurveGroup>
where
<C as Group>::ScalarField: Absorb,
{
sponge: PoseidonSponge<C::ScalarField>,
}
impl<C: CurveGroup> Transcript<C> for PoseidonTranscript<C>
where
<C as Group>::ScalarField: Absorb,
{
type TranscriptConfig = PoseidonConfig<C::ScalarField>;
fn new(poseidon_config: &Self::TranscriptConfig) -> Self {
let sponge = PoseidonSponge::<C::ScalarField>::new(poseidon_config);
Self { sponge }
}
fn absorb(&mut self, v: &C::ScalarField) {
self.sponge.absorb(&v);
}
fn absorb_vec(&mut self, v: &[C::ScalarField]) {
self.sponge.absorb(&v);
}
fn absorb_point(&mut self, p: &C) -> Result<(), Error> {
self.sponge.absorb(&prepare_point(p)?);
Ok(())
}
fn get_challenge(&mut self) -> C::ScalarField {
let c = self.sponge.squeeze_field_elements(1);
self.sponge.absorb(&c[0]);
c[0]
}
fn get_challenge_nbits(&mut self, nbits: usize) -> Vec<bool> {
self.sponge.squeeze_bits(nbits)
}
fn get_challenges(&mut self, n: usize) -> Vec<C::ScalarField> {
let c = self.sponge.squeeze_field_elements(n);
self.sponge.absorb(&c);
c
}
}
// Returns the point coordinates in Fr, so it can be absrobed by the transcript. It does not work
// over bytes in order to have a logic that can be reproduced in-circuit.
fn prepare_point<C: CurveGroup>(p: &C) -> Result<Vec<C::ScalarField>, Error> {
let affine = p.into_affine();
let zero_point = (&C::BaseField::zero(), &C::BaseField::one());
let xy = affine.xy().unwrap_or(zero_point);
let x_bi =
xy.0.to_base_prime_field_elements()
.next()
.expect("a")
.into_bigint();
let y_bi =
xy.1.to_base_prime_field_elements()
.next()
.expect("a")
.into_bigint();
Ok(vec![
C::ScalarField::from_le_bytes_mod_order(x_bi.to_bytes_le().as_ref()),
C::ScalarField::from_le_bytes_mod_order(y_bi.to_bytes_le().as_ref()),
])
}
/// PoseidonTranscriptVar implements the gadget compatible with PoseidonTranscript
pub struct PoseidonTranscriptVar<F: PrimeField> {
sponge: PoseidonSpongeVar<F>,
}
impl<F: PrimeField> TranscriptVar<F> for PoseidonTranscriptVar<F> {
type TranscriptVarConfig = PoseidonConfig<F>;
fn new(cs: ConstraintSystemRef<F>, poseidon_config: &Self::TranscriptVarConfig) -> Self {
let sponge = PoseidonSpongeVar::<F>::new(cs, poseidon_config);
Self { sponge }
}
fn absorb(&mut self, v: FpVar<F>) -> Result<(), SynthesisError> {
self.sponge.absorb(&v)
}
fn absorb_vec(&mut self, v: &[FpVar<F>]) -> Result<(), SynthesisError> {
self.sponge.absorb(&v)
}
fn get_challenge(&mut self) -> Result<FpVar<F>, SynthesisError> {
let c = self.sponge.squeeze_field_elements(1)?;
self.sponge.absorb(&c[0])?;
Ok(c[0].clone())
}
/// returns the bit representation of the challenge, we use its output in-circuit for the
/// `GC.scalar_mul_le` method.
fn get_challenge_nbits(&mut self, nbits: usize) -> Result<Vec<Boolean<F>>, SynthesisError> {
self.sponge.squeeze_bits(nbits)
}
fn get_challenges(&mut self, n: usize) -> Result<Vec<FpVar<F>>, SynthesisError> {
let c = self.sponge.squeeze_field_elements(n)?;
self.sponge.absorb(&c)?;
Ok(c)
}
}
/// WARNING the method poseidon_test_config is for tests only
pub fn poseidon_test_config<F: PrimeField>() -> PoseidonConfig<F> {
let full_rounds = 8;
let partial_rounds = 31;
let alpha = 5;
let rate = 2;
let (ark, mds) = ark_crypto_primitives::sponge::poseidon::find_poseidon_ark_and_mds::<F>(
F::MODULUS_BIT_SIZE as u64,
rate,
full_rounds,
partial_rounds,
0,
);
PoseidonConfig::new(
full_rounds as usize,
partial_rounds as usize,
alpha,
mds,
ark,
rate,
1,
)
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, groups::CurveVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_vesta::Projective as E2Projective;
use std::ops::Mul;
#[test]
fn test_transcript_and_transcriptvar_get_challenge() {
// use 'native' transcript
let config = poseidon_test_config::<Fr>();
let mut tr = PoseidonTranscript::<Projective>::new(&config);
tr.absorb(&Fr::from(42_u32));
let c = tr.get_challenge();
// use 'gadget' transcript
let cs = ConstraintSystem::<Fr>::new_ref();
let mut tr_var = PoseidonTranscriptVar::<Fr>::new(cs.clone(), &config);
let v = FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::from(42_u32))).unwrap();
tr_var.absorb(v).unwrap();
let c_var = tr_var.get_challenge().unwrap();
// assert that native & gadget transcripts return the same challenge
assert_eq!(c, c_var.value().unwrap());
}
#[test]
fn test_transcript_and_transcriptvar_nbits() {
let nbits = crate::constants::N_BITS_RO;
// use 'native' transcript
let config = poseidon_test_config::<Fq>();
let mut tr = PoseidonTranscript::<E2Projective>::new(&config);
tr.absorb(&Fq::from(42_u32));
// get challenge from native transcript
let c_bits = tr.get_challenge_nbits(nbits);
// use 'gadget' transcript
let cs = ConstraintSystem::<Fq>::new_ref();
let mut tr_var = PoseidonTranscriptVar::<Fq>::new(cs.clone(), &config);
let v = FpVar::<Fq>::new_witness(cs.clone(), || Ok(Fq::from(42_u32))).unwrap();
tr_var.absorb(v).unwrap();
// get challenge from circuit transcript
let c_var = tr_var.get_challenge_nbits(nbits).unwrap();
let P = Projective::generator();
let PVar = GVar::new_witness(cs.clone(), || Ok(P)).unwrap();
// multiply point P by the challenge in different formats, to ensure that we get the same
// result natively and in-circuit
// native c*P
let c_Fr = Fr::from_bigint(BigInteger::from_bits_le(&c_bits)).unwrap();
let cP_native = P.mul(c_Fr);
// native c*P using mul_bits_be (notice the .rev to convert the LE to BE)
let cP_native_bits = P.mul_bits_be(c_bits.into_iter().rev());
// in-circuit c*P using scalar_mul_le
let cPVar = PVar.scalar_mul_le(c_var.iter()).unwrap();
// check that they are equal
assert_eq!(
cP_native.into_affine(),
cPVar.value().unwrap().into_affine()
);
assert_eq!(
cP_native_bits.into_affine(),
cPVar.value().unwrap().into_affine()
);
}
}

View File

@@ -0,0 +1,9 @@
pub fn bit_decompose(input: u64, n: usize) -> Vec<bool> {
let mut res = Vec::with_capacity(n);
let mut i = input;
for _ in 0..n {
res.push(i & 1 == 1);
i >>= 1;
}
res
}

View File

@@ -0,0 +1,3 @@
pub mod multilinear_polynomial;
pub mod sum_check;
pub mod virtual_polynomial;

View File

@@ -0,0 +1,200 @@
// code forked from
// https://github.com/EspressoSystems/hyperplonk/blob/main/arithmetic/src/multilinear_polynomial.rs
//
// Copyright (c) 2023 Espresso Systems (espressosys.com)
// This file is part of the HyperPlonk library.
// You should have received a copy of the MIT License
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
use ark_ff::Field;
#[cfg(feature = "parallel")]
use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator};
pub use ark_poly::DenseMultilinearExtension;
pub fn fix_variables<F: Field>(
poly: &DenseMultilinearExtension<F>,
partial_point: &[F],
) -> DenseMultilinearExtension<F> {
assert!(
partial_point.len() <= poly.num_vars,
"invalid size of partial point"
);
let nv = poly.num_vars;
let mut poly = poly.evaluations.to_vec();
let dim = partial_point.len();
// evaluate single variable of partial point from left to right
for (i, point) in partial_point.iter().enumerate().take(dim) {
poly = fix_one_variable_helper(&poly, nv - i, point);
}
DenseMultilinearExtension::<F>::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
}
fn fix_one_variable_helper<F: Field>(data: &[F], nv: usize, point: &F) -> Vec<F> {
let mut res = vec![F::zero(); 1 << (nv - 1)];
// evaluate single variable of partial point from left to right
#[cfg(not(feature = "parallel"))]
for i in 0..(1 << (nv - 1)) {
res[i] = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point;
}
#[cfg(feature = "parallel")]
res.par_iter_mut().enumerate().for_each(|(i, x)| {
*x = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point;
});
res
}
pub fn evaluate_no_par<F: Field>(poly: &DenseMultilinearExtension<F>, point: &[F]) -> F {
assert_eq!(poly.num_vars, point.len());
fix_variables_no_par(poly, point).evaluations[0]
}
fn fix_variables_no_par<F: Field>(
poly: &DenseMultilinearExtension<F>,
partial_point: &[F],
) -> DenseMultilinearExtension<F> {
assert!(
partial_point.len() <= poly.num_vars,
"invalid size of partial point"
);
let nv = poly.num_vars;
let mut poly = poly.evaluations.to_vec();
let dim = partial_point.len();
// evaluate single variable of partial point from left to right
for i in 1..dim + 1 {
let r = partial_point[i - 1];
for b in 0..(1 << (nv - i)) {
poly[b] = poly[b << 1] + (poly[(b << 1) + 1] - poly[b << 1]) * r;
}
}
DenseMultilinearExtension::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
}
/// Given multilinear polynomial `p(x)` and s `s`, compute `s*p(x)`
pub fn scalar_mul<F: Field>(
poly: &DenseMultilinearExtension<F>,
s: &F,
) -> DenseMultilinearExtension<F> {
DenseMultilinearExtension {
evaluations: poly.evaluations.iter().map(|e| *e * s).collect(),
num_vars: poly.num_vars,
}
}
/// Test-only methods used in virtual_polynomial.rs
#[cfg(test)]
pub mod tests {
use super::*;
use ark_ff::PrimeField;
use ark_std::rand::RngCore;
use ark_std::{end_timer, start_timer};
use std::sync::Arc;
pub fn fix_last_variables<F: PrimeField>(
poly: &DenseMultilinearExtension<F>,
partial_point: &[F],
) -> DenseMultilinearExtension<F> {
assert!(
partial_point.len() <= poly.num_vars,
"invalid size of partial point"
);
let nv = poly.num_vars;
let mut poly = poly.evaluations.to_vec();
let dim = partial_point.len();
// evaluate single variable of partial point from left to right
for (i, point) in partial_point.iter().rev().enumerate().take(dim) {
poly = fix_last_variable_helper(&poly, nv - i, point);
}
DenseMultilinearExtension::<F>::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
}
fn fix_last_variable_helper<F: Field>(data: &[F], nv: usize, point: &F) -> Vec<F> {
let half_len = 1 << (nv - 1);
let mut res = vec![F::zero(); half_len];
// evaluate single variable of partial point from left to right
#[cfg(not(feature = "parallel"))]
for b in 0..half_len {
res[b] = data[b] + (data[b + half_len] - data[b]) * point;
}
#[cfg(feature = "parallel")]
res.par_iter_mut().enumerate().for_each(|(i, x)| {
*x = data[i] + (data[i + half_len] - data[i]) * point;
});
res
}
/// Sample a random list of multilinear polynomials.
/// Returns
/// - the list of polynomials,
/// - its sum of polynomial evaluations over the boolean hypercube.
#[cfg(test)]
pub fn random_mle_list<F: PrimeField, R: RngCore>(
nv: usize,
degree: usize,
rng: &mut R,
) -> (Vec<Arc<DenseMultilinearExtension<F>>>, F) {
let start = start_timer!(|| "sample random mle list");
let mut multiplicands = Vec::with_capacity(degree);
for _ in 0..degree {
multiplicands.push(Vec::with_capacity(1 << nv))
}
let mut sum = F::zero();
for _ in 0..(1 << nv) {
let mut product = F::one();
for e in multiplicands.iter_mut() {
let val = F::rand(rng);
e.push(val);
product *= val;
}
sum += product;
}
let list = multiplicands
.into_iter()
.map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x)))
.collect();
end_timer!(start);
(list, sum)
}
// Build a randomize list of mle-s whose sum is zero.
#[cfg(test)]
pub fn random_zero_mle_list<F: PrimeField, R: RngCore>(
nv: usize,
degree: usize,
rng: &mut R,
) -> Vec<Arc<DenseMultilinearExtension<F>>> {
let start = start_timer!(|| "sample random zero mle list");
let mut multiplicands = Vec::with_capacity(degree);
for _ in 0..degree {
multiplicands.push(Vec::with_capacity(1 << nv))
}
for _ in 0..(1 << nv) {
multiplicands[0].push(F::zero());
for e in multiplicands.iter_mut().skip(1) {
e.push(F::rand(rng));
}
}
let list = multiplicands
.into_iter()
.map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x)))
.collect();
end_timer!(start);
list
}
}

View File

@@ -0,0 +1,252 @@
// code forked from:
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
//
// Copyright (c) 2023 Espresso Systems (espressosys.com)
// This file is part of the HyperPlonk library.
// You should have received a copy of the MIT License
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
//! This module implements the sum check protocol.
use crate::{
transcript::Transcript,
utils::virtual_polynomial::{VPAuxInfo, VirtualPolynomial},
};
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_poly::univariate::DensePolynomial;
use ark_poly::{DenseMultilinearExtension, DenseUVPolynomial, Polynomial};
use ark_std::{end_timer, start_timer};
use std::{fmt::Debug, marker::PhantomData, sync::Arc};
use crate::utils::sum_check::structs::IOPProverMessage;
use crate::utils::sum_check::structs::IOPVerifierState;
use ark_ff::Field;
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
use structs::{IOPProof, IOPProverState};
mod prover;
pub mod structs;
pub mod verifier;
/// A generic sum-check trait over a curve group
pub trait SumCheck<C: CurveGroup> {
type VirtualPolynomial;
type VPAuxInfo;
type MultilinearExtension;
type SumCheckProof: Clone + Debug + Default + PartialEq;
type SumCheckSubClaim: Clone + Debug + Default + PartialEq;
/// Extract sum from the proof
fn extract_sum(proof: &Self::SumCheckProof) -> C::ScalarField;
/// Generate proof of the sum of polynomial over {0,1}^`num_vars`
///
/// The polynomial is represented in the form of a VirtualPolynomial.
fn prove(
poly: &Self::VirtualPolynomial,
transcript: &mut impl Transcript<C>,
) -> Result<Self::SumCheckProof, PolyIOPErrors>;
/// Verify the claimed sum using the proof
fn verify(
sum: C::ScalarField,
proof: &Self::SumCheckProof,
aux_info: &Self::VPAuxInfo,
transcript: &mut impl Transcript<C>,
) -> Result<Self::SumCheckSubClaim, PolyIOPErrors>;
}
/// Trait for sum check protocol prover side APIs.
pub trait SumCheckProver<C: CurveGroup>
where
Self: Sized,
{
type VirtualPolynomial;
type ProverMessage;
/// Initialize the prover state to argue for the sum of the input polynomial
/// over {0,1}^`num_vars`.
fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result<Self, PolyIOPErrors>;
/// Receive message from verifier, generate prover message, and proceed to
/// next round.
///
/// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2).
fn prove_round_and_update_state(
&mut self,
challenge: &Option<C::ScalarField>,
) -> Result<Self::ProverMessage, PolyIOPErrors>;
}
/// Trait for sum check protocol verifier side APIs.
pub trait SumCheckVerifier<C: CurveGroup> {
type VPAuxInfo;
type ProverMessage;
type Challenge;
type SumCheckSubClaim;
/// Initialize the verifier's state.
fn verifier_init(index_info: &Self::VPAuxInfo) -> Self;
/// Run verifier for the current round, given a prover message.
///
/// Note that `verify_round_and_update_state` only samples and stores
/// challenges; and update the verifier's state accordingly. The actual
/// verifications are deferred (in batch) to `check_and_generate_subclaim`
/// at the last step.
fn verify_round_and_update_state(
&mut self,
prover_msg: &Self::ProverMessage,
transcript: &mut impl Transcript<C>,
) -> Result<Self::Challenge, PolyIOPErrors>;
/// This function verifies the deferred checks in the interactive version of
/// the protocol; and generate the subclaim. Returns an error if the
/// proof failed to verify.
///
/// If the asserted sum is correct, then the multilinear polynomial
/// evaluated at `subclaim.point` will be `subclaim.expected_evaluation`.
/// Otherwise, it is highly unlikely that those two will be equal.
/// Larger field size guarantees smaller soundness error.
fn check_and_generate_subclaim(
&self,
asserted_sum: &C::ScalarField,
) -> Result<Self::SumCheckSubClaim, PolyIOPErrors>;
}
/// A SumCheckSubClaim is a claim generated by the verifier at the end of
/// verification when it is convinced.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct SumCheckSubClaim<F: PrimeField> {
/// the multi-dimensional point that this multilinear extension is evaluated
/// to
pub point: Vec<F>,
/// the expected evaluation
pub expected_evaluation: F,
}
#[derive(Clone, Debug, Default, Copy, PartialEq, Eq)]
pub struct IOPSumCheck<C: CurveGroup, T: Transcript<C>> {
#[doc(hidden)]
phantom: PhantomData<C>,
#[doc(hidden)]
phantom2: PhantomData<T>,
}
impl<C: CurveGroup, T: Transcript<C>> SumCheck<C> for IOPSumCheck<C, T> {
type SumCheckProof = IOPProof<C::ScalarField>;
type VirtualPolynomial = VirtualPolynomial<C::ScalarField>;
type VPAuxInfo = VPAuxInfo<C::ScalarField>;
type MultilinearExtension = Arc<DenseMultilinearExtension<C::ScalarField>>;
type SumCheckSubClaim = SumCheckSubClaim<C::ScalarField>;
fn extract_sum(proof: &Self::SumCheckProof) -> C::ScalarField {
let start = start_timer!(|| "extract sum");
let poly = DensePolynomial::from_coefficients_vec(proof.proofs[0].coeffs.clone());
let res = poly.evaluate(&C::ScalarField::ONE) + poly.evaluate(&C::ScalarField::ZERO);
end_timer!(start);
res
}
fn prove(
poly: &VirtualPolynomial<C::ScalarField>,
transcript: &mut impl Transcript<C>,
) -> Result<IOPProof<C::ScalarField>, PolyIOPErrors> {
transcript.absorb(&C::ScalarField::from(poly.aux_info.num_variables as u64));
transcript.absorb(&C::ScalarField::from(poly.aux_info.max_degree as u64));
let mut prover_state: IOPProverState<C> = IOPProverState::prover_init(poly)?;
let mut challenge: Option<C::ScalarField> = None;
let mut prover_msgs: Vec<IOPProverMessage<C::ScalarField>> =
Vec::with_capacity(poly.aux_info.num_variables);
for _ in 0..poly.aux_info.num_variables {
let prover_msg: IOPProverMessage<C::ScalarField> =
IOPProverState::prove_round_and_update_state(&mut prover_state, &challenge)?;
transcript.absorb_vec(&prover_msg.coeffs);
prover_msgs.push(prover_msg);
challenge = Some(transcript.get_challenge());
}
if let Some(p) = challenge {
prover_state.challenges.push(p)
};
Ok(IOPProof {
point: prover_state.challenges,
proofs: prover_msgs,
})
}
fn verify(
claimed_sum: C::ScalarField,
proof: &IOPProof<C::ScalarField>,
aux_info: &VPAuxInfo<C::ScalarField>,
transcript: &mut impl Transcript<C>,
) -> Result<SumCheckSubClaim<C::ScalarField>, PolyIOPErrors> {
transcript.absorb(&C::ScalarField::from(aux_info.num_variables as u64));
transcript.absorb(&C::ScalarField::from(aux_info.max_degree as u64));
let mut verifier_state = IOPVerifierState::verifier_init(aux_info);
for i in 0..aux_info.num_variables {
let prover_msg = proof.proofs.get(i).expect("proof is incomplete");
transcript.absorb_vec(&prover_msg.coeffs);
IOPVerifierState::verify_round_and_update_state(
&mut verifier_state,
prover_msg,
transcript,
)?;
}
IOPVerifierState::check_and_generate_subclaim(&verifier_state, &claimed_sum)
}
}
#[cfg(test)]
pub mod tests {
use std::sync::Arc;
use ark_ff::Field;
use ark_pallas::Fr;
use ark_pallas::Projective;
use ark_poly::DenseMultilinearExtension;
use ark_poly::MultilinearExtension;
use ark_std::test_rng;
use crate::transcript::poseidon::poseidon_test_config;
use crate::transcript::poseidon::PoseidonTranscript;
use crate::transcript::Transcript;
use crate::utils::sum_check::SumCheck;
use crate::utils::virtual_polynomial::VirtualPolynomial;
use super::IOPSumCheck;
#[test]
pub fn sumcheck_poseidon() {
let mut rng = test_rng();
let poly_mle = DenseMultilinearExtension::rand(5, &mut rng);
let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), Fr::ONE);
let poseidon_config = poseidon_test_config::<Fr>();
// sum-check prove
let mut poseidon_transcript_prove: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
let sum_check = IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::prove(
&virtual_poly,
&mut poseidon_transcript_prove,
)
.unwrap();
// sum-check verify
let claimed_sum =
IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::extract_sum(&sum_check);
let mut poseidon_transcript_verify: PoseidonTranscript<Projective> =
PoseidonTranscript::<Projective>::new(&poseidon_config);
let res_verify = IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::verify(
claimed_sum,
&sum_check,
&virtual_poly.aux_info,
&mut poseidon_transcript_verify,
);
assert!(res_verify.is_ok());
}
}

View File

@@ -0,0 +1,228 @@
// code forked from:
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
//
// Copyright (c) 2023 Espresso Systems (espressosys.com)
// This file is part of the HyperPlonk library.
// You should have received a copy of the MIT License
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
//! Prover subroutines for a SumCheck protocol.
use super::SumCheckProver;
use crate::utils::{
lagrange_poly::compute_lagrange_interpolated_poly, multilinear_polynomial::fix_variables,
virtual_polynomial::VirtualPolynomial,
};
use ark_ec::CurveGroup;
use ark_ff::Field;
use ark_ff::{batch_inversion, PrimeField};
use ark_poly::DenseMultilinearExtension;
use ark_std::{cfg_into_iter, end_timer, start_timer, vec::Vec};
use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator};
use std::sync::Arc;
use super::structs::{IOPProverMessage, IOPProverState};
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
// #[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator};
impl<C: CurveGroup> SumCheckProver<C> for IOPProverState<C> {
type VirtualPolynomial = VirtualPolynomial<C::ScalarField>;
type ProverMessage = IOPProverMessage<C::ScalarField>;
/// Initialize the prover state to argue for the sum of the input polynomial
/// over {0,1}^`num_vars`.
fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result<Self, PolyIOPErrors> {
let start = start_timer!(|| "sum check prover init");
if polynomial.aux_info.num_variables == 0 {
return Err(PolyIOPErrors::InvalidParameters(
"Attempt to prove a constant.".to_string(),
));
}
end_timer!(start);
Ok(Self {
challenges: Vec::with_capacity(polynomial.aux_info.num_variables),
round: 0,
poly: polynomial.clone(),
extrapolation_aux: (1..polynomial.aux_info.max_degree)
.map(|degree| {
let points = (0..1 + degree as u64)
.map(C::ScalarField::from)
.collect::<Vec<_>>();
let weights = barycentric_weights(&points);
(points, weights)
})
.collect(),
})
}
/// Receive message from verifier, generate prover message, and proceed to
/// next round.
///
/// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2).
fn prove_round_and_update_state(
&mut self,
challenge: &Option<C::ScalarField>,
) -> Result<Self::ProverMessage, PolyIOPErrors> {
// let start =
// start_timer!(|| format!("sum check prove {}-th round and update state",
// self.round));
if self.round >= self.poly.aux_info.num_variables {
return Err(PolyIOPErrors::InvalidProver(
"Prover is not active".to_string(),
));
}
// let fix_argument = start_timer!(|| "fix argument");
// Step 1:
// fix argument and evaluate f(x) over x_m = r; where r is the challenge
// for the current round, and m is the round number, indexed from 1
//
// i.e.:
// at round m <= n, for each mle g(x_1, ... x_n) within the flattened_mle
// which has already been evaluated to
//
// g(r_1, ..., r_{m-1}, x_m ... x_n)
//
// eval g over r_m, and mutate g to g(r_1, ... r_m,, x_{m+1}... x_n)
let mut flattened_ml_extensions: Vec<DenseMultilinearExtension<C::ScalarField>> = self
.poly
.flattened_ml_extensions
.par_iter()
.map(|x| x.as_ref().clone())
.collect();
if let Some(chal) = challenge {
if self.round == 0 {
return Err(PolyIOPErrors::InvalidProver(
"first round should be prover first.".to_string(),
));
}
self.challenges.push(*chal);
let r = self.challenges[self.round - 1];
// #[cfg(feature = "parallel")]
flattened_ml_extensions
.par_iter_mut()
.for_each(|mle| *mle = fix_variables(mle, &[r]));
// #[cfg(not(feature = "parallel"))]
// flattened_ml_extensions
// .iter_mut()
// .for_each(|mle| *mle = fix_variables(mle, &[r]));
} else if self.round > 0 {
return Err(PolyIOPErrors::InvalidProver(
"verifier message is empty".to_string(),
));
}
// end_timer!(fix_argument);
self.round += 1;
let products_list = self.poly.products.clone();
let mut products_sum = vec![C::ScalarField::ZERO; self.poly.aux_info.max_degree + 1];
// Step 2: generate sum for the partial evaluated polynomial:
// f(r_1, ... r_m,, x_{m+1}... x_n)
products_list.iter().for_each(|(coefficient, products)| {
let mut sum = cfg_into_iter!(0..1 << (self.poly.aux_info.num_variables - self.round))
.fold(
|| {
(
vec![(C::ScalarField::ZERO, C::ScalarField::ZERO); products.len()],
vec![C::ScalarField::ZERO; products.len() + 1],
)
},
|(mut buf, mut acc), b| {
buf.iter_mut()
.zip(products.iter())
.for_each(|((eval, step), f)| {
let table = &flattened_ml_extensions[*f];
*eval = table[b << 1];
*step = table[(b << 1) + 1] - table[b << 1];
});
acc[0] += buf.iter().map(|(eval, _)| eval).product::<C::ScalarField>();
acc[1..].iter_mut().for_each(|acc| {
buf.iter_mut().for_each(|(eval, step)| *eval += step as &_);
*acc += buf.iter().map(|(eval, _)| eval).product::<C::ScalarField>();
});
(buf, acc)
},
)
.map(|(_, partial)| partial)
.reduce(
|| vec![C::ScalarField::ZERO; products.len() + 1],
|mut sum, partial| {
sum.iter_mut()
.zip(partial.iter())
.for_each(|(sum, partial)| *sum += partial);
sum
},
);
sum.iter_mut().for_each(|sum| *sum *= coefficient);
let extraploation = cfg_into_iter!(0..self.poly.aux_info.max_degree - products.len())
.map(|i| {
let (points, weights) = &self.extrapolation_aux[products.len() - 1];
let at = C::ScalarField::from((products.len() + 1 + i) as u64);
extrapolate(points, weights, &sum, &at)
})
.collect::<Vec<_>>();
products_sum
.iter_mut()
.zip(sum.iter().chain(extraploation.iter()))
.for_each(|(products_sum, sum)| *products_sum += sum);
});
// update prover's state to the partial evaluated polynomial
self.poly.flattened_ml_extensions = flattened_ml_extensions
.par_iter()
.map(|x| Arc::new(x.clone()))
.collect();
let prover_poly = compute_lagrange_interpolated_poly::<C::ScalarField>(&products_sum);
Ok(IOPProverMessage {
coeffs: prover_poly.coeffs,
})
}
}
#[allow(clippy::filter_map_bool_then)]
fn barycentric_weights<F: PrimeField>(points: &[F]) -> Vec<F> {
let mut weights = points
.iter()
.enumerate()
.map(|(j, point_j)| {
points
.iter()
.enumerate()
.filter_map(|(i, point_i)| (i != j).then(|| *point_j - point_i))
.reduce(|acc, value| acc * value)
.unwrap_or_else(F::one)
})
.collect::<Vec<_>>();
batch_inversion(&mut weights);
weights
}
fn extrapolate<F: PrimeField>(points: &[F], weights: &[F], evals: &[F], at: &F) -> F {
let (coeffs, sum_inv) = {
let mut coeffs = points.iter().map(|point| *at - point).collect::<Vec<_>>();
batch_inversion(&mut coeffs);
coeffs.iter_mut().zip(weights).for_each(|(coeff, weight)| {
*coeff *= weight;
});
let sum_inv = coeffs.iter().sum::<F>().inverse().unwrap_or_default();
(coeffs, sum_inv)
};
coeffs
.iter()
.zip(evals)
.map(|(coeff, eval)| *coeff * eval)
.sum::<F>()
* sum_inv
}

View File

@@ -0,0 +1,60 @@
// code forked from:
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
//
// Copyright (c) 2023 Espresso Systems (espressosys.com)
// This file is part of the HyperPlonk library.
// You should have received a copy of the MIT License
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
//! This module defines structs that are shared by all sub protocols.
use crate::utils::virtual_polynomial::VirtualPolynomial;
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_serialize::CanonicalSerialize;
/// An IOP proof is a collections of
/// - messages from prover to verifier at each round through the interactive
/// protocol.
/// - a point that is generated by the transcript for evaluation
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct IOPProof<F: PrimeField> {
pub point: Vec<F>,
pub proofs: Vec<IOPProverMessage<F>>,
}
/// A message from the prover to the verifier at a given round
/// is a list of coeffs.
#[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)]
pub struct IOPProverMessage<F: PrimeField> {
pub(crate) coeffs: Vec<F>,
}
/// Prover State of a PolyIOP.
#[derive(Debug)]
pub struct IOPProverState<C: CurveGroup> {
/// sampled randomness given by the verifier
pub challenges: Vec<C::ScalarField>,
/// the current round number
pub(crate) round: usize,
/// pointer to the virtual polynomial
pub(crate) poly: VirtualPolynomial<C::ScalarField>,
/// points with precomputed barycentric weights for extrapolating smaller
/// degree uni-polys to `max_degree + 1` evaluations.
#[allow(clippy::type_complexity)]
pub(crate) extrapolation_aux: Vec<(Vec<C::ScalarField>, Vec<C::ScalarField>)>,
}
/// Verifier State of a PolyIOP, generic over a curve group
#[derive(Debug)]
pub struct IOPVerifierState<C: CurveGroup> {
pub(crate) round: usize,
pub(crate) num_vars: usize,
pub(crate) finished: bool,
/// a list storing the univariate polynomial in evaluation form sent by the
/// prover at each round
pub(crate) polynomials_received: Vec<Vec<C::ScalarField>>,
/// a list storing the randomness sampled by the verifier at each round
pub(crate) challenges: Vec<C::ScalarField>,
}

View File

@@ -0,0 +1,300 @@
// code forked from:
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
//
// Copyright (c) 2023 Espresso Systems (espressosys.com)
// This file is part of the HyperPlonk library.
// You should have received a copy of the MIT License
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
//! Verifier subroutines for a SumCheck protocol.
use super::{
structs::{IOPProverMessage, IOPVerifierState},
SumCheckSubClaim, SumCheckVerifier,
};
use crate::{transcript::Transcript, utils::virtual_polynomial::VPAuxInfo};
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_poly::Polynomial;
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
use ark_std::{end_timer, start_timer};
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
#[cfg(feature = "parallel")]
use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
impl<C: CurveGroup> SumCheckVerifier<C> for IOPVerifierState<C> {
type VPAuxInfo = VPAuxInfo<C::ScalarField>;
type ProverMessage = IOPProverMessage<C::ScalarField>;
type Challenge = C::ScalarField;
type SumCheckSubClaim = SumCheckSubClaim<C::ScalarField>;
/// Initialize the verifier's state.
fn verifier_init(index_info: &Self::VPAuxInfo) -> Self {
let start = start_timer!(|| "sum check verifier init");
let res = Self {
round: 1,
num_vars: index_info.num_variables,
finished: false,
polynomials_received: Vec::with_capacity(index_info.num_variables),
challenges: Vec::with_capacity(index_info.num_variables),
};
end_timer!(start);
res
}
fn verify_round_and_update_state(
&mut self,
prover_msg: &<IOPVerifierState<C> as SumCheckVerifier<C>>::ProverMessage,
transcript: &mut impl Transcript<C>,
) -> Result<<IOPVerifierState<C> as SumCheckVerifier<C>>::Challenge, PolyIOPErrors> {
let start =
start_timer!(|| format!("sum check verify {}-th round and update state", self.round));
if self.finished {
return Err(PolyIOPErrors::InvalidVerifier(
"Incorrect verifier state: Verifier is already finished.".to_string(),
));
}
// In an interactive protocol, the verifier should
//
// 1. check if the received 'P(0) + P(1) = expected`.
// 2. set `expected` to P(r)`
//
// When we turn the protocol to a non-interactive one, it is sufficient to defer
// such checks to `check_and_generate_subclaim` after the last round.
let challenge = transcript.get_challenge();
self.challenges.push(challenge);
self.polynomials_received.push(prover_msg.coeffs.to_vec());
if self.round == self.num_vars {
// accept and close
self.finished = true;
} else {
// proceed to the next round
self.round += 1;
}
end_timer!(start);
Ok(challenge)
}
fn check_and_generate_subclaim(
&self,
asserted_sum: &C::ScalarField,
) -> Result<Self::SumCheckSubClaim, PolyIOPErrors> {
let start = start_timer!(|| "sum check check and generate subclaim");
if !self.finished {
return Err(PolyIOPErrors::InvalidVerifier(
"Incorrect verifier state: Verifier has not finished.".to_string(),
));
}
if self.polynomials_received.len() != self.num_vars {
return Err(PolyIOPErrors::InvalidVerifier(
"insufficient rounds".to_string(),
));
}
// the deferred check during the interactive phase:
// 2. set `expected` to P(r)`
#[cfg(feature = "parallel")]
let mut expected_vec = self
.polynomials_received
.clone()
.into_par_iter()
.zip(self.challenges.clone().into_par_iter())
.map(|(coeffs, challenge)| {
// Removed check on number of evaluations here since verifier receives polynomial in coeffs form
let prover_poly = DensePolynomial::from_coefficients_slice(&coeffs);
Ok(prover_poly.evaluate(&challenge))
})
.collect::<Result<Vec<_>, PolyIOPErrors>>()?;
#[cfg(not(feature = "parallel"))]
let mut expected_vec = self
.polynomials_received
.clone()
.into_iter()
.zip(self.challenges.clone().into_iter())
.map(|(evaluations, challenge)| {
// Removed check on number of evaluations here since verifier receives polynomial in coeffs form
let prover_poly = DensePolynomial::from_coefficients_slice(&coeffs);
Ok(prover_poly.evaluate(&challenge))
})
.collect::<Result<Vec<_>, PolyIOPErrors>>()?;
// insert the asserted_sum to the first position of the expected vector
expected_vec.insert(0, *asserted_sum);
for (coeffs, &expected) in self
.polynomials_received
.iter()
.zip(expected_vec.iter())
.take(self.num_vars)
{
let poly = DensePolynomial::from_coefficients_slice(coeffs);
let eval_at_one: C::ScalarField = poly.iter().sum();
let eval_at_zero: C::ScalarField = poly.coeffs[0];
let eval = eval_at_one + eval_at_zero;
println!("evaluations: {:?}, expected: {:?}", eval, expected);
// the deferred check during the interactive phase:
// 1. check if the received 'P(0) + P(1) = expected`.
if eval != expected {
return Err(PolyIOPErrors::InvalidProof(
"Prover message is not consistent with the claim.".to_string(),
));
}
}
end_timer!(start);
Ok(SumCheckSubClaim {
point: self.challenges.clone(),
// the last expected value (not checked within this function) will be included in the
// subclaim
expected_evaluation: expected_vec[self.num_vars],
})
}
}
/// Interpolate a uni-variate degree-`p_i.len()-1` polynomial and evaluate this
/// polynomial at `eval_at`:
///
/// \sum_{i=0}^len p_i * (\prod_{j!=i} (eval_at - j)/(i-j) )
///
/// This implementation is linear in number of inputs in terms of field
/// operations. It also has a quadratic term in primitive operations which is
/// negligible compared to field operations.
/// TODO: The quadratic term can be removed by precomputing the lagrange
/// coefficients.
pub fn interpolate_uni_poly<F: PrimeField>(p_i: &[F], eval_at: F) -> Result<F, PolyIOPErrors> {
let start = start_timer!(|| "sum check interpolate uni poly opt");
let len = p_i.len();
let mut evals = vec![];
let mut prod = eval_at;
evals.push(eval_at);
// `prod = \prod_{j} (eval_at - j)`
for e in 1..len {
let tmp = eval_at - F::from(e as u64);
evals.push(tmp);
prod *= tmp;
}
let mut res = F::zero();
// we want to compute \prod (j!=i) (i-j) for a given i
//
// we start from the last step, which is
// denom[len-1] = (len-1) * (len-2) *... * 2 * 1
// the step before that is
// denom[len-2] = (len-2) * (len-3) * ... * 2 * 1 * -1
// and the step before that is
// denom[len-3] = (len-3) * (len-4) * ... * 2 * 1 * -1 * -2
//
// i.e., for any i, the one before this will be derived from
// denom[i-1] = denom[i] * (len-i) / i
//
// that is, we only need to store
// - the last denom for i = len-1, and
// - the ratio between current step and fhe last step, which is the product of
// (len-i) / i from all previous steps and we store this product as a fraction
// number to reduce field divisions.
// We know
// - 2^61 < factorial(20) < 2^62
// - 2^122 < factorial(33) < 2^123
// so we will be able to compute the ratio
// - for len <= 20 with i64
// - for len <= 33 with i128
// - for len > 33 with BigInt
if p_i.len() <= 20 {
let last_denominator = F::from(u64_factorial(len - 1));
let mut ratio_numerator = 1i64;
let mut ratio_denominator = 1u64;
for i in (0..len).rev() {
let ratio_numerator_f = if ratio_numerator < 0 {
-F::from((-ratio_numerator) as u64)
} else {
F::from(ratio_numerator as u64)
};
res += p_i[i] * prod * F::from(ratio_denominator)
/ (last_denominator * ratio_numerator_f * evals[i]);
// compute denom for the next step is current_denom * (len-i)/i
if i != 0 {
ratio_numerator *= -(len as i64 - i as i64);
ratio_denominator *= i as u64;
}
}
} else if p_i.len() <= 33 {
let last_denominator = F::from(u128_factorial(len - 1));
let mut ratio_numerator = 1i128;
let mut ratio_denominator = 1u128;
for i in (0..len).rev() {
let ratio_numerator_f = if ratio_numerator < 0 {
-F::from((-ratio_numerator) as u128)
} else {
F::from(ratio_numerator as u128)
};
res += p_i[i] * prod * F::from(ratio_denominator)
/ (last_denominator * ratio_numerator_f * evals[i]);
// compute denom for the next step is current_denom * (len-i)/i
if i != 0 {
ratio_numerator *= -(len as i128 - i as i128);
ratio_denominator *= i as u128;
}
}
} else {
let mut denom_up = field_factorial::<F>(len - 1);
let mut denom_down = F::one();
for i in (0..len).rev() {
res += p_i[i] * prod * denom_down / (denom_up * evals[i]);
// compute denom for the next step is current_denom * (len-i)/i
if i != 0 {
denom_up *= -F::from((len - i) as u64);
denom_down *= F::from(i as u64);
}
}
}
end_timer!(start);
Ok(res)
}
/// compute the factorial(a) = 1 * 2 * ... * a
#[inline]
fn field_factorial<F: PrimeField>(a: usize) -> F {
let mut res = F::one();
for i in 2..=a {
res *= F::from(i as u64);
}
res
}
/// compute the factorial(a) = 1 * 2 * ... * a
#[inline]
fn u128_factorial(a: usize) -> u128 {
let mut res = 1u128;
for i in 2..=a {
res *= i as u128;
}
res
}
/// compute the factorial(a) = 1 * 2 * ... * a
#[inline]
fn u64_factorial(a: usize) -> u64 {
let mut res = 1u64;
for i in 2..=a {
res *= i as u64;
}
res
}

View File

@@ -0,0 +1,550 @@
// code forked from
// https://github.com/privacy-scaling-explorations/multifolding-poc/blob/main/src/espresso/virtual_polynomial.rs
//
// Copyright (c) 2023 Espresso Systems (espressosys.com)
// This file is part of the HyperPlonk library.
// You should have received a copy of the MIT License
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
//! This module defines our main mathematical object `VirtualPolynomial`; and
//! various functions associated with it.
use ark_ff::PrimeField;
use ark_poly::{DenseMultilinearExtension, MultilinearExtension};
use ark_serialize::CanonicalSerialize;
use ark_std::{end_timer, start_timer};
use rayon::prelude::*;
use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, sync::Arc};
use thiserror::Error;
use ark_std::string::String;
//-- aritherrors
/// A `enum` specifying the possible failure modes of the arithmetics.
#[derive(Error, Debug)]
pub enum ArithErrors {
#[error("Invalid parameters: {0}")]
InvalidParameters(String),
#[error("Should not arrive to this point")]
ShouldNotArrive,
#[error("An error during (de)serialization: {0}")]
SerializationErrors(ark_serialize::SerializationError),
}
impl From<ark_serialize::SerializationError> for ArithErrors {
fn from(e: ark_serialize::SerializationError) -> Self {
Self::SerializationErrors(e)
}
}
//-- aritherrors
#[rustfmt::skip]
/// A virtual polynomial is a sum of products of multilinear polynomials;
/// where the multilinear polynomials are stored via their multilinear
/// extensions: `(coefficient, DenseMultilinearExtension)`
///
/// * Number of products n = `polynomial.products.len()`,
/// * Number of multiplicands of ith product m_i =
/// `polynomial.products[i].1.len()`,
/// * Coefficient of ith product c_i = `polynomial.products[i].0`
///
/// The resulting polynomial is
///
/// $$ \sum_{i=0}^{n} c_i \cdot \prod_{j=0}^{m_i} P_{ij} $$
///
/// Example:
/// f = c0 * f0 * f1 * f2 + c1 * f3 * f4
/// where f0 ... f4 are multilinear polynomials
///
/// - flattened_ml_extensions stores the multilinear extension representation of
/// f0, f1, f2, f3 and f4
/// - products is
/// \[
/// (c0, \[0, 1, 2\]),
/// (c1, \[3, 4\])
/// \]
/// - raw_pointers_lookup_table maps fi to i
///
#[derive(Clone, Debug, Default, PartialEq)]
pub struct VirtualPolynomial<F: PrimeField> {
/// Aux information about the multilinear polynomial
pub aux_info: VPAuxInfo<F>,
/// list of reference to products (as usize) of multilinear extension
pub products: Vec<(F, Vec<usize>)>,
/// Stores multilinear extensions in which product multiplicand can refer
/// to.
pub flattened_ml_extensions: Vec<Arc<DenseMultilinearExtension<F>>>,
/// Pointers to the above poly extensions
raw_pointers_lookup_table: HashMap<*const DenseMultilinearExtension<F>, usize>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)]
/// Auxiliary information about the multilinear polynomial
pub struct VPAuxInfo<F: PrimeField> {
/// max number of multiplicands in each product
pub max_degree: usize,
/// number of variables of the polynomial
pub num_variables: usize,
/// Associated field
#[doc(hidden)]
pub phantom: PhantomData<F>,
}
impl<F: PrimeField> Add for &VirtualPolynomial<F> {
type Output = VirtualPolynomial<F>;
fn add(self, other: &VirtualPolynomial<F>) -> Self::Output {
let start = start_timer!(|| "virtual poly add");
let mut res = self.clone();
for products in other.products.iter() {
let cur: Vec<Arc<DenseMultilinearExtension<F>>> = products
.1
.iter()
.map(|&x| other.flattened_ml_extensions[x].clone())
.collect();
res.add_mle_list(cur, products.0)
.expect("add product failed");
}
end_timer!(start);
res
}
}
// TODO: convert this into a trait
impl<F: PrimeField> VirtualPolynomial<F> {
/// Creates an empty virtual polynomial with `num_variables`.
pub fn new(num_variables: usize) -> Self {
VirtualPolynomial {
aux_info: VPAuxInfo {
max_degree: 0,
num_variables,
phantom: PhantomData,
},
products: Vec::new(),
flattened_ml_extensions: Vec::new(),
raw_pointers_lookup_table: HashMap::new(),
}
}
/// Creates an new virtual polynomial from a MLE and its coefficient.
pub fn new_from_mle(mle: &Arc<DenseMultilinearExtension<F>>, coefficient: F) -> Self {
let mle_ptr: *const DenseMultilinearExtension<F> = Arc::as_ptr(mle);
let mut hm = HashMap::new();
hm.insert(mle_ptr, 0);
VirtualPolynomial {
aux_info: VPAuxInfo {
// The max degree is the max degree of any individual variable
max_degree: 1,
num_variables: mle.num_vars,
phantom: PhantomData,
},
// here `0` points to the first polynomial of `flattened_ml_extensions`
products: vec![(coefficient, vec![0])],
flattened_ml_extensions: vec![mle.clone()],
raw_pointers_lookup_table: hm,
}
}
/// Add a product of list of multilinear extensions to self
/// Returns an error if the list is empty, or the MLE has a different
/// `num_vars` from self.
///
/// The MLEs will be multiplied together, and then multiplied by the scalar
/// `coefficient`.
pub fn add_mle_list(
&mut self,
mle_list: impl IntoIterator<Item = Arc<DenseMultilinearExtension<F>>>,
coefficient: F,
) -> Result<(), ArithErrors> {
let mle_list: Vec<Arc<DenseMultilinearExtension<F>>> = mle_list.into_iter().collect();
let mut indexed_product = Vec::with_capacity(mle_list.len());
if mle_list.is_empty() {
return Err(ArithErrors::InvalidParameters(
"input mle_list is empty".to_string(),
));
}
self.aux_info.max_degree = max(self.aux_info.max_degree, mle_list.len());
for mle in mle_list {
if mle.num_vars != self.aux_info.num_variables {
return Err(ArithErrors::InvalidParameters(format!(
"product has a multiplicand with wrong number of variables {} vs {}",
mle.num_vars, self.aux_info.num_variables
)));
}
let mle_ptr: *const DenseMultilinearExtension<F> = Arc::as_ptr(&mle);
if let Some(index) = self.raw_pointers_lookup_table.get(&mle_ptr) {
indexed_product.push(*index)
} else {
let curr_index = self.flattened_ml_extensions.len();
self.flattened_ml_extensions.push(mle.clone());
self.raw_pointers_lookup_table.insert(mle_ptr, curr_index);
indexed_product.push(curr_index);
}
}
self.products.push((coefficient, indexed_product));
Ok(())
}
/// Multiple the current VirtualPolynomial by an MLE:
/// - add the MLE to the MLE list;
/// - multiple each product by MLE and its coefficient.
/// Returns an error if the MLE has a different `num_vars` from self.
pub fn mul_by_mle(
&mut self,
mle: Arc<DenseMultilinearExtension<F>>,
coefficient: F,
) -> Result<(), ArithErrors> {
let start = start_timer!(|| "mul by mle");
if mle.num_vars != self.aux_info.num_variables {
return Err(ArithErrors::InvalidParameters(format!(
"product has a multiplicand with wrong number of variables {} vs {}",
mle.num_vars, self.aux_info.num_variables
)));
}
let mle_ptr: *const DenseMultilinearExtension<F> = Arc::as_ptr(&mle);
// check if this mle already exists in the virtual polynomial
let mle_index = match self.raw_pointers_lookup_table.get(&mle_ptr) {
Some(&p) => p,
None => {
self.raw_pointers_lookup_table
.insert(mle_ptr, self.flattened_ml_extensions.len());
self.flattened_ml_extensions.push(mle);
self.flattened_ml_extensions.len() - 1
}
};
for (prod_coef, indices) in self.products.iter_mut() {
// - add the MLE to the MLE list;
// - multiple each product by MLE and its coefficient.
indices.push(mle_index);
*prod_coef *= coefficient;
}
// increase the max degree by one as the MLE has degree 1.
self.aux_info.max_degree += 1;
end_timer!(start);
Ok(())
}
/// Given virtual polynomial `p(x)` and scalar `s`, compute `s*p(x)`
pub fn scalar_mul(&mut self, s: &F) {
for (prod_coef, _) in self.products.iter_mut() {
*prod_coef *= s;
}
}
/// Evaluate the virtual polynomial at point `point`.
/// Returns an error is point.len() does not match `num_variables`.
pub fn evaluate(&self, point: &[F]) -> Result<F, ArithErrors> {
let start = start_timer!(|| "evaluation");
if self.aux_info.num_variables != point.len() {
return Err(ArithErrors::InvalidParameters(format!(
"wrong number of variables {} vs {}",
self.aux_info.num_variables,
point.len()
)));
}
// Evaluate all the MLEs at `point`
let evals: Vec<F> = self
.flattened_ml_extensions
.iter()
.map(|x| {
x.evaluate(point).unwrap() // safe unwrap here since we have
// already checked that num_var
// matches
})
.collect();
let res = self
.products
.iter()
.map(|(c, p)| *c * p.iter().map(|&i| evals[i]).product::<F>())
.sum();
end_timer!(start);
Ok(res)
}
// Input poly f(x) and a random vector r, output
// \hat f(x) = \sum_{x_i \in eval_x} f(x_i) eq(x, r)
// where
// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
//
// This function is used in ZeroCheck.
pub fn build_f_hat(&self, r: &[F]) -> Result<Self, ArithErrors> {
let start = start_timer!(|| "zero check build hat f");
if self.aux_info.num_variables != r.len() {
return Err(ArithErrors::InvalidParameters(format!(
"r.len() is different from number of variables: {} vs {}",
r.len(),
self.aux_info.num_variables
)));
}
let eq_x_r = build_eq_x_r(r)?;
let mut res = self.clone();
res.mul_by_mle(eq_x_r, F::one())?;
end_timer!(start);
Ok(res)
}
}
/// Evaluate eq polynomial.
pub fn eq_eval<F: PrimeField>(x: &[F], y: &[F]) -> Result<F, ArithErrors> {
if x.len() != y.len() {
return Err(ArithErrors::InvalidParameters(
"x and y have different length".to_string(),
));
}
let start = start_timer!(|| "eq_eval");
let mut res = F::one();
for (&xi, &yi) in x.iter().zip(y.iter()) {
let xi_yi = xi * yi;
res *= xi_yi + xi_yi - xi - yi + F::one();
}
end_timer!(start);
Ok(res)
}
/// This function build the eq(x, r) polynomial for any given r.
///
/// Evaluate
/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
/// over r, which is
/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
fn build_eq_x_r<F: PrimeField>(r: &[F]) -> Result<Arc<DenseMultilinearExtension<F>>, ArithErrors> {
let evals = build_eq_x_r_vec(r)?;
let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), evals);
Ok(Arc::new(mle))
}
/// This function build the eq(x, r) polynomial for any given r, and output the
/// evaluation of eq(x, r) in its vector form.
///
/// Evaluate
/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
/// over r, which is
/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
fn build_eq_x_r_vec<F: PrimeField>(r: &[F]) -> Result<Vec<F>, ArithErrors> {
// we build eq(x,r) from its evaluations
// we want to evaluate eq(x,r) over x \in {0, 1}^num_vars
// for example, with num_vars = 4, x is a binary vector of 4, then
// 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3)
// 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3)
// 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3)
// 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3)
// ....
// 1 1 1 1 -> r0 * r1 * r2 * r3
// we will need 2^num_var evaluations
let mut eval = Vec::new();
build_eq_x_r_helper(r, &mut eval)?;
Ok(eval)
}
/// A helper function to build eq(x, r) recursively.
/// This function takes `r.len()` steps, and for each step it requires a maximum
/// `r.len()-1` multiplications.
fn build_eq_x_r_helper<F: PrimeField>(r: &[F], buf: &mut Vec<F>) -> Result<(), ArithErrors> {
if r.is_empty() {
return Err(ArithErrors::InvalidParameters("r length is 0".to_string()));
} else if r.len() == 1 {
// initializing the buffer with [1-r_0, r_0]
buf.push(F::one() - r[0]);
buf.push(r[0]);
} else {
build_eq_x_r_helper(&r[1..], buf)?;
// suppose at the previous step we received [b_1, ..., b_k]
// for the current step we will need
// if x_0 = 0: (1-r0) * [b_1, ..., b_k]
// if x_0 = 1: r0 * [b_1, ..., b_k]
// let mut res = vec![];
// for &b_i in buf.iter() {
// let tmp = r[0] * b_i;
// res.push(b_i - tmp);
// res.push(tmp);
// }
// *buf = res;
let mut res = vec![F::zero(); buf.len() << 1];
res.par_iter_mut().enumerate().for_each(|(i, val)| {
let bi = buf[i >> 1];
let tmp = r[0] * bi;
if i & 1 == 0 {
*val = bi - tmp;
} else {
*val = tmp;
}
});
*buf = res;
}
Ok(())
}
/// Decompose an integer into a binary vector in little endian.
pub fn bit_decompose(input: u64, num_var: usize) -> Vec<bool> {
let mut res = Vec::with_capacity(num_var);
let mut i = input;
for _ in 0..num_var {
res.push(i & 1 == 1);
i >>= 1;
}
res
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::multilinear_polynomial::tests::random_mle_list;
use ark_ff::UniformRand;
use ark_pallas::Fr;
use ark_std::{
rand::{Rng, RngCore},
test_rng,
};
impl<F: PrimeField> VirtualPolynomial<F> {
/// Sample a random virtual polynomial, return the polynomial and its sum.
fn rand<R: RngCore>(
nv: usize,
num_multiplicands_range: (usize, usize),
num_products: usize,
rng: &mut R,
) -> Result<(Self, F), ArithErrors> {
let start = start_timer!(|| "sample random virtual polynomial");
let mut sum = F::zero();
let mut poly = VirtualPolynomial::new(nv);
for _ in 0..num_products {
let num_multiplicands =
rng.gen_range(num_multiplicands_range.0..num_multiplicands_range.1);
let (product, product_sum) = random_mle_list(nv, num_multiplicands, rng);
let coefficient = F::rand(rng);
poly.add_mle_list(product.into_iter(), coefficient)?;
sum += product_sum * coefficient;
}
end_timer!(start);
Ok((poly, sum))
}
}
#[test]
fn test_virtual_polynomial_additions() -> Result<(), ArithErrors> {
let mut rng = test_rng();
for nv in 2..5 {
for num_products in 2..5 {
let base: Vec<Fr> = (0..nv).map(|_| Fr::rand(&mut rng)).collect();
let (a, _a_sum) =
VirtualPolynomial::<Fr>::rand(nv, (2, 3), num_products, &mut rng)?;
let (b, _b_sum) =
VirtualPolynomial::<Fr>::rand(nv, (2, 3), num_products, &mut rng)?;
let c = &a + &b;
assert_eq!(
a.evaluate(base.as_ref())? + b.evaluate(base.as_ref())?,
c.evaluate(base.as_ref())?
);
}
}
Ok(())
}
#[test]
fn test_virtual_polynomial_mul_by_mle() -> Result<(), ArithErrors> {
let mut rng = test_rng();
for nv in 2..5 {
for num_products in 2..5 {
let base: Vec<Fr> = (0..nv).map(|_| Fr::rand(&mut rng)).collect();
let (a, _a_sum) =
VirtualPolynomial::<Fr>::rand(nv, (2, 3), num_products, &mut rng)?;
let (b, _b_sum) = random_mle_list(nv, 1, &mut rng);
let b_mle = b[0].clone();
let coeff = Fr::rand(&mut rng);
let b_vp = VirtualPolynomial::new_from_mle(&b_mle, coeff);
let mut c = a.clone();
c.mul_by_mle(b_mle, coeff)?;
assert_eq!(
a.evaluate(base.as_ref())? * b_vp.evaluate(base.as_ref())?,
c.evaluate(base.as_ref())?
);
}
}
Ok(())
}
#[test]
fn test_eq_xr() {
let mut rng = test_rng();
for nv in 4..10 {
let r: Vec<Fr> = (0..nv).map(|_| Fr::rand(&mut rng)).collect();
let eq_x_r = build_eq_x_r(r.as_ref()).unwrap();
let eq_x_r2 = build_eq_x_r_for_test(r.as_ref());
assert_eq!(eq_x_r, eq_x_r2);
}
}
/// Naive method to build eq(x, r).
/// Only used for testing purpose.
// Evaluate
// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
// over r, which is
// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
fn build_eq_x_r_for_test<F: PrimeField>(r: &[F]) -> Arc<DenseMultilinearExtension<F>> {
// we build eq(x,r) from its evaluations
// we want to evaluate eq(x,r) over x \in {0, 1}^num_vars
// for example, with num_vars = 4, x is a binary vector of 4, then
// 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3)
// 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3)
// 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3)
// 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3)
// ....
// 1 1 1 1 -> r0 * r1 * r2 * r3
// we will need 2^num_var evaluations
// First, we build array for {1 - r_i}
let one_minus_r: Vec<F> = r.iter().map(|ri| F::one() - ri).collect();
let num_var = r.len();
let mut eval = vec![];
for i in 0..1 << num_var {
let mut current_eval = F::one();
let bit_sequence = bit_decompose(i, num_var);
for (&bit, (ri, one_minus_ri)) in
bit_sequence.iter().zip(r.iter().zip(one_minus_r.iter()))
{
current_eval *= if bit { *ri } else { *one_minus_ri };
}
eval.push(current_eval);
}
let mle = DenseMultilinearExtension::from_evaluations_vec(num_var, eval);
Arc::new(mle)
}
}

View File

@@ -0,0 +1,105 @@
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::FieldVar,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use core::{borrow::Borrow, marker::PhantomData};
use crate::utils::vec::SparseMatrix;
pub fn mat_vec_mul_sparse<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
m: SparseMatrixVar<F, CF, FV>,
v: Vec<FV>,
) -> Vec<FV> {
let mut res = vec![FV::zero(); m.n_rows];
for (row_i, row) in m.coeffs.iter().enumerate() {
for (value, col_i) in row.iter() {
res[row_i] += value.clone().mul(&v[*col_i].clone());
}
}
res
}
pub fn vec_add<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
a: &Vec<FV>,
b: &Vec<FV>,
) -> Result<Vec<FV>, SynthesisError> {
if a.len() != b.len() {
return Err(SynthesisError::Unsatisfiable);
}
let mut r: Vec<FV> = vec![FV::zero(); a.len()];
for i in 0..a.len() {
r[i] = a[i].clone() + b[i].clone();
}
Ok(r)
}
pub fn vec_scalar_mul<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
vec: &Vec<FV>,
c: &FV,
) -> Vec<FV> {
let mut result = vec![FV::zero(); vec.len()];
for (i, a) in vec.iter().enumerate() {
result[i] = a.clone() * c;
}
result
}
pub fn hadamard<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
a: &Vec<FV>,
b: &Vec<FV>,
) -> Result<Vec<FV>, SynthesisError> {
if a.len() != b.len() {
return Err(SynthesisError::Unsatisfiable);
}
let mut r: Vec<FV> = vec![FV::zero(); a.len()];
for i in 0..a.len() {
r[i] = a[i].clone() * b[i].clone();
}
Ok(r)
}
#[derive(Debug, Clone)]
pub struct SparseMatrixVar<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> {
_f: PhantomData<F>,
_cf: PhantomData<CF>,
_fv: PhantomData<FV>,
pub n_rows: usize,
pub n_cols: usize,
// same format as the native SparseMatrix (which follows ark_relations::r1cs::Matrix format
pub coeffs: Vec<Vec<(FV, usize)>>,
}
impl<F, CF, FV> AllocVar<SparseMatrix<F>, CF> for SparseMatrixVar<F, CF, FV>
where
F: PrimeField,
CF: PrimeField,
FV: FieldVar<F, CF>,
{
fn new_variable<T: Borrow<SparseMatrix<F>>>(
cs: impl Into<Namespace<CF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let mut coeffs: Vec<Vec<(FV, usize)>> = Vec::new();
for row in val.borrow().coeffs.iter() {
let mut rowVar: Vec<(FV, usize)> = Vec::new();
for &(value, col_i) in row.iter() {
let coeffVar = FV::new_variable(cs.clone(), || Ok(value), mode)?;
rowVar.push((coeffVar, col_i));
}
coeffs.push(rowVar);
}
Ok(Self {
_f: PhantomData,
_cf: PhantomData,
_fv: PhantomData,
n_rows: val.borrow().n_rows,
n_cols: val.borrow().n_cols,
coeffs,
})
})
}
}

View File

@@ -0,0 +1,77 @@
/// A boolean hypercube structure to create an ergonomic evaluation domain
use crate::utils::virtual_polynomial::bit_decompose;
use ark_ff::PrimeField;
use std::marker::PhantomData;
/// A boolean hypercube that returns its points as an iterator
/// If you iterate on it for 3 variables you will get points in little-endian order:
/// 000 -> 100 -> 010 -> 110 -> 001 -> 101 -> 011 -> 111
#[derive(Debug, Clone)]
pub struct BooleanHypercube<F: PrimeField> {
_f: PhantomData<F>,
n_vars: usize,
current: u64,
max: u64,
}
impl<F: PrimeField> BooleanHypercube<F> {
pub fn new(n_vars: usize) -> Self {
BooleanHypercube::<F> {
_f: PhantomData::<F>,
n_vars,
current: 0,
max: 2_u32.pow(n_vars as u32) as u64,
}
}
/// returns the entry at given i (which is the little-endian bit representation of i)
pub fn at_i(&self, i: usize) -> Vec<F> {
assert!(i < self.max as usize);
let bits = bit_decompose((i) as u64, self.n_vars);
bits.iter().map(|&x| F::from(x)).collect()
}
}
impl<F: PrimeField> Iterator for BooleanHypercube<F> {
type Item = Vec<F>;
fn next(&mut self) -> Option<Self::Item> {
let bits = bit_decompose(self.current, self.n_vars);
let result: Vec<F> = bits.iter().map(|&x| F::from(x)).collect();
self.current += 1;
if self.current > self.max {
return None;
}
Some(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::vec::tests::to_F_dense_matrix;
use ark_pallas::Fr;
#[test]
fn test_hypercube() {
let expected_results = to_F_dense_matrix(vec![
vec![0, 0, 0],
vec![1, 0, 0],
vec![0, 1, 0],
vec![1, 1, 0],
vec![0, 0, 1],
vec![1, 0, 1],
vec![0, 1, 1],
vec![1, 1, 1],
]);
let bhc = BooleanHypercube::<Fr>::new(3);
for (i, point) in bhc.clone().enumerate() {
assert_eq!(point, expected_results[i]);
assert_eq!(point, bhc.at_i(i));
}
}
}

View File

@@ -0,0 +1,123 @@
use ark_ff::PrimeField;
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
/// Computes the lagrange interpolated polynomial from the given points `p_i`
pub fn compute_lagrange_interpolated_poly<F: PrimeField>(p_i: &[F]) -> DensePolynomial<F> {
// domain is 0..p_i.len(), to fit `interpolate_uni_poly` from hyperplonk
let domain: Vec<usize> = (0..p_i.len()).collect();
// compute l(x), common to every basis polynomial
let mut l_x = DensePolynomial::from_coefficients_vec(vec![F::ONE]);
for x_m in domain.clone() {
let prod_m = DensePolynomial::from_coefficients_vec(vec![-F::from(x_m as u64), F::ONE]);
l_x = &l_x * &prod_m;
}
// compute each w_j - barycentric weights
let mut w_j_vector: Vec<F> = vec![];
for x_j in domain.clone() {
let mut w_j = F::ONE;
for x_m in domain.clone() {
if x_m != x_j {
let prod = (F::from(x_j as u64) - F::from(x_m as u64))
.inverse()
.unwrap(); // an inverse always exists since x_j != x_m (!=0)
// hence, we call unwrap() here without checking the Option's content
w_j *= prod;
}
}
w_j_vector.push(w_j);
}
// compute each polynomial within the sum L(x)
let mut lagrange_poly = DensePolynomial::from_coefficients_vec(vec![F::ZERO]);
for (j, w_j) in w_j_vector.iter().enumerate() {
let x_j = domain[j];
let y_j = p_i[j];
// we multiply by l(x) here, otherwise the below division will not work - deg(0)/deg(d)
let poly_numerator = &(&l_x * (*w_j)) * (y_j);
let poly_denominator =
DensePolynomial::from_coefficients_vec(vec![-F::from(x_j as u64), F::ONE]);
let poly = &poly_numerator / &poly_denominator;
lagrange_poly = &lagrange_poly + &poly;
}
lagrange_poly
}
#[cfg(test)]
mod tests {
use crate::utils::espresso::sum_check::verifier::interpolate_uni_poly;
use crate::utils::lagrange_poly::compute_lagrange_interpolated_poly;
use ark_pallas::Fr;
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial};
use ark_std::{vec::Vec, UniformRand};
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
#[test]
fn test_compute_lagrange_interpolated_poly() {
let mut prng = ark_std::test_rng();
for degree in 1..30 {
let poly = DensePolynomial::<Fr>::rand(degree, &mut prng);
// range (which is exclusive) is from 0 to degree + 1, since we need degree + 1 evaluations
let evals = (0..(degree + 1))
.map(|i| poly.evaluate(&Fr::from(i as u64)))
.collect::<Vec<Fr>>();
let lagrange_poly = compute_lagrange_interpolated_poly(&evals);
for _ in 0..10 {
let query = Fr::rand(&mut prng);
let lagrange_eval = lagrange_poly.evaluate(&query);
let eval = poly.evaluate(&query);
assert_eq!(eval, lagrange_eval);
assert_eq!(lagrange_poly.degree(), poly.degree());
}
}
}
#[test]
fn test_interpolation() -> Result<(), PolyIOPErrors> {
let mut prng = ark_std::test_rng();
// test a polynomial with 20 known points, i.e., with degree 19
let poly = DensePolynomial::<Fr>::rand(20 - 1, &mut prng);
let evals = (0..20)
.map(|i| poly.evaluate(&Fr::from(i)))
.collect::<Vec<Fr>>();
let query = Fr::rand(&mut prng);
assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?);
assert_eq!(
compute_lagrange_interpolated_poly(&evals).evaluate(&query),
interpolate_uni_poly(&evals, query)?
);
// test a polynomial with 33 known points, i.e., with degree 32
let poly = DensePolynomial::<Fr>::rand(33 - 1, &mut prng);
let evals = (0..33)
.map(|i| poly.evaluate(&Fr::from(i)))
.collect::<Vec<Fr>>();
let query = Fr::rand(&mut prng);
assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?);
assert_eq!(
compute_lagrange_interpolated_poly(&evals).evaluate(&query),
interpolate_uni_poly(&evals, query)?
);
// test a polynomial with 64 known points, i.e., with degree 63
let poly = DensePolynomial::<Fr>::rand(64 - 1, &mut prng);
let evals = (0..64)
.map(|i| poly.evaluate(&Fr::from(i)))
.collect::<Vec<Fr>>();
let query = Fr::rand(&mut prng);
assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?);
assert_eq!(
compute_lagrange_interpolated_poly(&evals).evaluate(&query),
interpolate_uni_poly(&evals, query)?
);
Ok(())
}
}

View File

@@ -0,0 +1,167 @@
/// Some basic MLE utilities
use ark_ff::PrimeField;
use ark_poly::DenseMultilinearExtension;
use ark_std::log2;
use super::vec::SparseMatrix;
/// Pad matrix so that its columns and rows are powers of two
pub fn pad_matrix<F: PrimeField>(m: &SparseMatrix<F>) -> SparseMatrix<F> {
let mut r = m.clone();
r.n_rows = m.n_rows.next_power_of_two();
r.n_cols = m.n_cols.next_power_of_two();
r
}
/// Returns the dense multilinear extension from the given matrix, without modifying the original
/// matrix.
pub fn matrix_to_mle<F: PrimeField>(matrix: SparseMatrix<F>) -> DenseMultilinearExtension<F> {
let n_vars: usize = (log2(matrix.n_rows) + log2(matrix.n_cols)) as usize; // n_vars = s + s'
// Matrices might need to get padded before turned into an MLE
let padded_matrix = pad_matrix(&matrix);
// build dense vector representing the sparse padded matrix
let mut v: Vec<F> = vec![F::zero(); padded_matrix.n_rows * padded_matrix.n_cols];
for (row_i, row) in padded_matrix.coeffs.iter().enumerate() {
for &(value, col_i) in row.iter() {
v[(padded_matrix.n_cols * row_i) + col_i] = value;
}
}
// convert the dense vector into a mle
vec_to_mle(n_vars, &v)
}
/// Takes the n_vars and a dense vector and returns its dense MLE.
pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
let v_padded: Vec<F> = if v.len() != (1 << n_vars) {
// pad to 2^n_vars
[
v.clone(),
std::iter::repeat(F::zero())
.take((1 << n_vars) - v.len())
.collect(),
]
.concat()
} else {
v.clone()
};
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
}
pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
dbg!(n_vars);
dbg!(v.len());
// Pad to 2^n_vars
let v_padded: Vec<F> = [
v.clone(),
std::iter::repeat(F::zero())
.take((1 << n_vars) - v.len())
.collect(),
]
.concat();
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
ccs::tests::get_test_z,
utils::multilinear_polynomial::fix_variables,
utils::multilinear_polynomial::tests::fix_last_variables,
utils::{hypercube::BooleanHypercube, vec::tests::to_F_matrix},
};
use ark_poly::MultilinearExtension;
use ark_std::Zero;
use ark_pallas::Fr;
#[test]
fn test_matrix_to_mle() {
let A = to_F_matrix::<Fr>(vec![
vec![2, 3, 4, 4],
vec![4, 11, 14, 14],
vec![2, 8, 17, 17],
vec![420, 4, 2, 0],
]);
let A_mle = matrix_to_mle(A);
dbg!(&A_mle);
assert_eq!(A_mle.evaluations.len(), 16); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals
let A = to_F_matrix::<Fr>(vec![
vec![2, 3, 4, 4, 1],
vec![4, 11, 14, 14, 2],
vec![2, 8, 17, 17, 3],
vec![420, 4, 2, 0, 4],
vec![420, 4, 2, 0, 5],
]);
let A_mle = matrix_to_mle(A.clone());
assert_eq!(A_mle.evaluations.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals
// check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values
let bhc = BooleanHypercube::new(A_mle.num_vars);
let A_padded = pad_matrix(&A);
let A_padded_dense = A_padded.to_dense();
for (i, A_row) in A_padded_dense.iter().enumerate() {
for (j, _) in A_row.iter().enumerate() {
let s_i_j = bhc.at_i(i * A_row.len() + j);
assert_eq!(A_mle.evaluate(&s_i_j).unwrap(), A_padded_dense[i][j]);
}
}
}
#[test]
fn test_vec_to_mle() {
let z = get_test_z::<Fr>(3);
let n_vars = 3;
let z_mle = dense_vec_to_mle(n_vars, &z);
// check that the z_mle evaluated over the boolean hypercube equals the vec z_i values
let bhc = BooleanHypercube::new(z_mle.num_vars);
for (i, z_i) in z.iter().enumerate() {
let s_i = bhc.at_i(i);
assert_eq!(z_mle.evaluate(&s_i).unwrap(), z_i.clone());
}
// for the rest of elements of the boolean hypercube, expect it to evaluate to zero
for i in (z.len())..(1 << z_mle.num_vars) {
let s_i = bhc.at_i(i);
assert_eq!(z_mle.evaluate(&s_i).unwrap(), Fr::zero());
}
}
#[test]
fn test_fix_variables() {
let A = to_F_matrix(vec![
vec![2, 3, 4, 4],
vec![4, 11, 14, 14],
vec![2, 8, 17, 17],
vec![420, 4, 2, 0],
]);
let A_mle = matrix_to_mle(A.clone());
let A = A.to_dense();
let bhc = BooleanHypercube::new(2);
for (i, y) in bhc.enumerate() {
// First check that the arkworks and espresso funcs match
let expected_fix_left = A_mle.fix_variables(&y); // try arkworks fix_variables
let fix_left = fix_variables(&A_mle, &y); // try espresso fix_variables
assert_eq!(fix_left, expected_fix_left);
// Check that fixing first variables pins down a column
// i.e. fixing x to 0 will return the first column
// fixing x to 1 will return the second column etc.
let column_i: Vec<Fr> = A.clone().iter().map(|x| x[i]).collect();
assert_eq!(fix_left.evaluations, column_i);
// Now check that fixing last variables pins down a row
// i.e. fixing y to 0 will return the first row
// fixing y to 1 will return the second row etc.
let row_i: Vec<Fr> = A[i].clone();
let fix_right = fix_last_variables(&A_mle, &y);
assert_eq!(fix_right.evaluations, row_i);
}
}
}

View File

@@ -0,0 +1,12 @@
pub mod bit;
pub mod gadgets;
pub mod hypercube;
pub mod lagrange_poly;
pub mod mle;
pub mod vec;
// expose espresso local modules
pub mod espresso;
pub use crate::utils::espresso::multilinear_polynomial;
pub use crate::utils::espresso::sum_check;
pub use crate::utils::espresso::virtual_polynomial;

View File

@@ -0,0 +1,209 @@
use ark_ff::PrimeField;
pub use ark_relations::r1cs::Matrix as R1CSMatrix;
use ark_std::cfg_iter;
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
use crate::Error;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SparseMatrix<F: PrimeField> {
pub n_rows: usize,
pub n_cols: usize,
/// coeffs = R1CSMatrix = Vec<Vec<(F, usize)>>, which contains each row and the F is the value
/// of the coefficient and the usize indicates the column position
pub coeffs: R1CSMatrix<F>,
}
impl<F: PrimeField> SparseMatrix<F> {
pub fn to_dense(&self) -> Vec<Vec<F>> {
let mut r: Vec<Vec<F>> = vec![vec![F::zero(); self.n_cols]; self.n_rows];
for (row_i, row) in self.coeffs.iter().enumerate() {
for &(value, col_i) in row.iter() {
r[row_i][col_i] = value;
}
}
r
}
}
pub fn dense_matrix_to_sparse<F: PrimeField>(m: Vec<Vec<F>>) -> SparseMatrix<F> {
let mut r = SparseMatrix::<F> {
n_rows: m.len(),
n_cols: m[0].len(),
coeffs: Vec::new(),
};
for m_row in m.iter() {
let mut row: Vec<(F, usize)> = Vec::new();
for (col_i, value) in m_row.iter().enumerate() {
if !value.is_zero() {
row.push((*value, col_i));
}
}
r.coeffs.push(row);
}
r
}
pub fn vec_add<F: PrimeField>(a: &[F], b: &[F]) -> Result<Vec<F>, Error> {
if a.len() != b.len() {
return Err(Error::NotSameLength(
"a.len()".to_string(),
a.len(),
"b.len()".to_string(),
b.len(),
));
}
Ok(a.iter().zip(b.iter()).map(|(x, y)| *x + y).collect())
}
pub fn vec_sub<F: PrimeField>(a: &[F], b: &[F]) -> Result<Vec<F>, Error> {
if a.len() != b.len() {
return Err(Error::NotSameLength(
"a.len()".to_string(),
a.len(),
"b.len()".to_string(),
b.len(),
));
}
Ok(a.iter().zip(b.iter()).map(|(x, y)| *x - y).collect())
}
pub fn vec_scalar_mul<F: PrimeField>(vec: &[F], c: &F) -> Vec<F> {
vec.iter().map(|a| *a * c).collect()
}
pub fn is_zero_vec<F: PrimeField>(vec: &[F]) -> bool {
vec.iter().all(|a| a.is_zero())
}
pub fn mat_vec_mul<F: PrimeField>(M: &Vec<Vec<F>>, z: &[F]) -> Result<Vec<F>, Error> {
if M.is_empty() {
return Err(Error::Empty);
}
if M[0].len() != z.len() {
return Err(Error::NotSameLength(
"M[0].len()".to_string(),
M[0].len(),
"z.len()".to_string(),
z.len(),
));
}
let mut r: Vec<F> = vec![F::zero(); M.len()];
for (i, M_i) in M.iter().enumerate() {
for (j, M_ij) in M_i.iter().enumerate() {
r[i] += *M_ij * z[j];
}
}
Ok(r)
}
pub fn mat_vec_mul_sparse<F: PrimeField>(M: &SparseMatrix<F>, z: &[F]) -> Result<Vec<F>, Error> {
if M.n_cols != z.len() {
return Err(Error::NotSameLength(
"M.n_cols".to_string(),
M.n_cols,
"z.len()".to_string(),
z.len(),
));
}
let mut res = vec![F::zero(); M.n_rows];
for (row_i, row) in M.coeffs.iter().enumerate() {
for &(value, col_i) in row.iter() {
res[row_i] += value * z[col_i];
}
}
Ok(res)
}
pub fn hadamard<F: PrimeField>(a: &[F], b: &[F]) -> Result<Vec<F>, Error> {
if a.len() != b.len() {
return Err(Error::NotSameLength(
"a.len()".to_string(),
a.len(),
"b.len()".to_string(),
b.len(),
));
}
Ok(cfg_iter!(a).zip(b).map(|(a, b)| *a * b).collect())
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::Fr;
pub fn to_F_matrix<F: PrimeField>(M: Vec<Vec<usize>>) -> SparseMatrix<F> {
dense_matrix_to_sparse(to_F_dense_matrix(M))
}
pub fn to_F_dense_matrix<F: PrimeField>(M: Vec<Vec<usize>>) -> Vec<Vec<F>> {
M.iter()
.map(|m| m.iter().map(|r| F::from(*r as u64)).collect())
.collect()
}
pub fn to_F_vec<F: PrimeField>(z: Vec<usize>) -> Vec<F> {
z.iter().map(|c| F::from(*c as u64)).collect()
}
#[test]
fn test_dense_sparse_conversions() {
let A = to_F_dense_matrix::<Fr>(vec![
vec![0, 1, 0, 0, 0, 0],
vec![0, 0, 0, 1, 0, 0],
vec![0, 1, 0, 0, 1, 0],
vec![5, 0, 0, 0, 0, 1],
]);
let A_sparse = dense_matrix_to_sparse(A.clone());
assert_eq!(A_sparse.to_dense(), A);
}
// test mat_vec_mul & mat_vec_mul_sparse
#[test]
fn test_mat_vec_mul() {
let A = to_F_matrix::<Fr>(vec![
vec![0, 1, 0, 0, 0, 0],
vec![0, 0, 0, 1, 0, 0],
vec![0, 1, 0, 0, 1, 0],
vec![5, 0, 0, 0, 0, 1],
])
.to_dense();
let z = to_F_vec(vec![1, 3, 35, 9, 27, 30]);
assert_eq!(mat_vec_mul(&A, &z).unwrap(), to_F_vec(vec![3, 9, 30, 35]));
assert_eq!(
mat_vec_mul_sparse(&dense_matrix_to_sparse(A), &z).unwrap(),
to_F_vec(vec![3, 9, 30, 35])
);
let A = to_F_matrix::<Fr>(vec![vec![2, 3, 4, 5], vec![4, 8, 12, 14], vec![9, 8, 7, 6]]);
let v = to_F_vec(vec![19, 55, 50, 3]);
assert_eq!(
mat_vec_mul(&A.to_dense(), &v).unwrap(),
to_F_vec(vec![418, 1158, 979])
);
assert_eq!(
mat_vec_mul_sparse(&A, &v).unwrap(),
to_F_vec(vec![418, 1158, 979])
);
}
#[test]
fn test_hadamard_product() {
let a = to_F_vec::<Fr>(vec![1, 2, 3, 4, 5, 6]);
let b = to_F_vec(vec![7, 8, 9, 10, 11, 12]);
assert_eq!(
hadamard(&a, &b).unwrap(),
to_F_vec(vec![7, 16, 27, 40, 55, 72])
);
}
#[test]
fn test_vec_add() {
let a: Vec<Fr> = to_F_vec::<Fr>(vec![1, 2, 3, 4, 5, 6]);
let b: Vec<Fr> = to_F_vec(vec![7, 8, 9, 10, 11, 12]);
assert_eq!(
vec_add(&a, &b).unwrap(),
to_F_vec(vec![8, 10, 12, 14, 16, 18])
);
}
}