mirror of
https://github.com/arnaucube/sonobe.git
synced 2026-01-23 20:43:52 +01:00
Add solidity groth16, kzg10 and final decider verifiers in a dedicated workspace (#70)
* change: Refactor structure into workspace * chore: Add empty readme * change: Transform repo into workspace * add: Create folding-verifier-solidity crate * add: Include askama.toml for `sol` extension escaper * add: Jordi's old Groth16 verifier .sol template and adapt it * tmp: create simple template struct to test * Update FoldingSchemes trait, fit Nova+CycleFold - update lib.rs's `FoldingScheme` trait interface - fit Nova+CycleFold into the `FoldingScheme` trait - refactor `src/nova/*` * chore: add serialization assets for testing Now we include an `assets` folder with a serialized proof & vk for tests * Add `examples` dir, with Nova's `FoldingScheme` example * polishing * expose poseidon_test_config outside tests * change: Refactor structure into workspace * chore: Add empty readme * change: Transform repo into workspace * add: Create folding-verifier-solidity crate * add: Include askama.toml for `sol` extension escaper * add: Jordi's old Groth16 verifier .sol template and adapt it * tmp: create simple template struct to test * feat: templating kzg working * chore: add emv and revm * feat: start evm file * chore: add ark-poly-commit * chore: move `commitment` to `folding-schemes` * chore: update `.gitignore` to ignore generated contracts * chore: update template with bn254 lib on it (avoids import), update for loop to account for whitespaces * refactor: update template with no lib * feat: add evm deploy code, compile and create kzg verifier * chore: update `Cargo.toml` to have `folding-schemes` available with verifiers * feat: start kzg prove and verify with sol * chore: compute crs from kzg prover * feat: evm kzg verification passing * tmp * change: Swap order of G2 coordinates within the template * Update way to serialize proof with correct order * chore: update `Cargo.toml` * chore: add revm * chore: add `save_solidity` * refactor: verifiers in dedicated mod * refactor: have dedicated `utils` module * chore: expose modules * chore: update verifier for kzg * chore: rename templates * fix: look for binary using also name of contract * refactor: generate groth16 proof for sha256 pre-image, generate groth16 template with verifying key * chore: template renaming * fix: switch circuit for circuit that simply adds * feat: generates test data on the fly * feat: update to latest groth16 verifier * refactor: rename folder, update `.gitignore` * chore: update `Cargo.toml` * chore: update templates extension to indicate that they are templates * chore: rename templates, both files and structs * fix: template inheritance working * feat: template spdx and pragma statements * feat: decider verifier compiles, update test for kzg10 and groth16 templates * feat: parameterize which size of the crs should be stored on the contract * chore: add comment on how the groth16 and kzg10 proofs will be linked together * chore: cargo clippy run * chore: cargo clippy tests * chore: cargo fmt * refactor: remove unused lifetime parameter * chore: end merge * chore: move examples to `folding-schemes` workspace * get latest main changes * fix: temp fix clippy warnings, will remove lints once not used in tests only * fix: cargo clippy lint added on `code_size` * fix: update path to test circuit and add step for installing solc * chore: remove `save_solidity` steps * fix: the borrowed expression implements the required traits * chore: update `Cargo.toml` * chore: remove extra `[patch.crates-io]` * fix: update to patch at the workspace level and add comment explaining this * refactor: correct `staticcall` with valid input/output sizes and change return syntax for pairing * refactor: expose modules and remove `dead_code` calls * chore: update `README.md`, add additional comments on `kzg10` template and update `groth16` template comments * chore: be clearer on attributions on `kzg10` --------- Co-authored-by: CPerezz <c.perezbaro@gmail.com> Co-authored-by: arnaucube <root@arnaucube.com>
This commit is contained in:
9
folding-schemes/src/utils/bit.rs
Normal file
9
folding-schemes/src/utils/bit.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
pub fn bit_decompose(input: u64, n: usize) -> Vec<bool> {
|
||||
let mut res = Vec::with_capacity(n);
|
||||
let mut i = input;
|
||||
for _ in 0..n {
|
||||
res.push(i & 1 == 1);
|
||||
i >>= 1;
|
||||
}
|
||||
res
|
||||
}
|
||||
3
folding-schemes/src/utils/espresso/mod.rs
Normal file
3
folding-schemes/src/utils/espresso/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod multilinear_polynomial;
|
||||
pub mod sum_check;
|
||||
pub mod virtual_polynomial;
|
||||
200
folding-schemes/src/utils/espresso/multilinear_polynomial.rs
Normal file
200
folding-schemes/src/utils/espresso/multilinear_polynomial.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
// code forked from
|
||||
// https://github.com/EspressoSystems/hyperplonk/blob/main/arithmetic/src/multilinear_polynomial.rs
|
||||
//
|
||||
// Copyright (c) 2023 Espresso Systems (espressosys.com)
|
||||
// This file is part of the HyperPlonk library.
|
||||
|
||||
// You should have received a copy of the MIT License
|
||||
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
|
||||
|
||||
use ark_ff::Field;
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator};
|
||||
|
||||
pub use ark_poly::DenseMultilinearExtension;
|
||||
|
||||
pub fn fix_variables<F: Field>(
|
||||
poly: &DenseMultilinearExtension<F>,
|
||||
partial_point: &[F],
|
||||
) -> DenseMultilinearExtension<F> {
|
||||
assert!(
|
||||
partial_point.len() <= poly.num_vars,
|
||||
"invalid size of partial point"
|
||||
);
|
||||
let nv = poly.num_vars;
|
||||
let mut poly = poly.evaluations.to_vec();
|
||||
let dim = partial_point.len();
|
||||
// evaluate single variable of partial point from left to right
|
||||
for (i, point) in partial_point.iter().enumerate().take(dim) {
|
||||
poly = fix_one_variable_helper(&poly, nv - i, point);
|
||||
}
|
||||
|
||||
DenseMultilinearExtension::<F>::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
|
||||
}
|
||||
|
||||
fn fix_one_variable_helper<F: Field>(data: &[F], nv: usize, point: &F) -> Vec<F> {
|
||||
let mut res = vec![F::zero(); 1 << (nv - 1)];
|
||||
|
||||
// evaluate single variable of partial point from left to right
|
||||
#[cfg(not(feature = "parallel"))]
|
||||
for i in 0..(1 << (nv - 1)) {
|
||||
res[i] = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point;
|
||||
}
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
res.par_iter_mut().enumerate().for_each(|(i, x)| {
|
||||
*x = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point;
|
||||
});
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
pub fn evaluate_no_par<F: Field>(poly: &DenseMultilinearExtension<F>, point: &[F]) -> F {
|
||||
assert_eq!(poly.num_vars, point.len());
|
||||
fix_variables_no_par(poly, point).evaluations[0]
|
||||
}
|
||||
|
||||
fn fix_variables_no_par<F: Field>(
|
||||
poly: &DenseMultilinearExtension<F>,
|
||||
partial_point: &[F],
|
||||
) -> DenseMultilinearExtension<F> {
|
||||
assert!(
|
||||
partial_point.len() <= poly.num_vars,
|
||||
"invalid size of partial point"
|
||||
);
|
||||
let nv = poly.num_vars;
|
||||
let mut poly = poly.evaluations.to_vec();
|
||||
let dim = partial_point.len();
|
||||
// evaluate single variable of partial point from left to right
|
||||
for i in 1..dim + 1 {
|
||||
let r = partial_point[i - 1];
|
||||
for b in 0..(1 << (nv - i)) {
|
||||
poly[b] = poly[b << 1] + (poly[(b << 1) + 1] - poly[b << 1]) * r;
|
||||
}
|
||||
}
|
||||
DenseMultilinearExtension::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
|
||||
}
|
||||
|
||||
/// Given multilinear polynomial `p(x)` and s `s`, compute `s*p(x)`
|
||||
pub fn scalar_mul<F: Field>(
|
||||
poly: &DenseMultilinearExtension<F>,
|
||||
s: &F,
|
||||
) -> DenseMultilinearExtension<F> {
|
||||
DenseMultilinearExtension {
|
||||
evaluations: poly.evaluations.iter().map(|e| *e * s).collect(),
|
||||
num_vars: poly.num_vars,
|
||||
}
|
||||
}
|
||||
|
||||
/// Test-only methods used in virtual_polynomial.rs
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_std::rand::RngCore;
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn fix_last_variables<F: PrimeField>(
|
||||
poly: &DenseMultilinearExtension<F>,
|
||||
partial_point: &[F],
|
||||
) -> DenseMultilinearExtension<F> {
|
||||
assert!(
|
||||
partial_point.len() <= poly.num_vars,
|
||||
"invalid size of partial point"
|
||||
);
|
||||
let nv = poly.num_vars;
|
||||
let mut poly = poly.evaluations.to_vec();
|
||||
let dim = partial_point.len();
|
||||
// evaluate single variable of partial point from left to right
|
||||
for (i, point) in partial_point.iter().rev().enumerate().take(dim) {
|
||||
poly = fix_last_variable_helper(&poly, nv - i, point);
|
||||
}
|
||||
|
||||
DenseMultilinearExtension::<F>::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
|
||||
}
|
||||
|
||||
fn fix_last_variable_helper<F: Field>(data: &[F], nv: usize, point: &F) -> Vec<F> {
|
||||
let half_len = 1 << (nv - 1);
|
||||
let mut res = vec![F::zero(); half_len];
|
||||
|
||||
// evaluate single variable of partial point from left to right
|
||||
#[cfg(not(feature = "parallel"))]
|
||||
for b in 0..half_len {
|
||||
res[b] = data[b] + (data[b + half_len] - data[b]) * point;
|
||||
}
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
res.par_iter_mut().enumerate().for_each(|(i, x)| {
|
||||
*x = data[i] + (data[i + half_len] - data[i]) * point;
|
||||
});
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Sample a random list of multilinear polynomials.
|
||||
/// Returns
|
||||
/// - the list of polynomials,
|
||||
/// - its sum of polynomial evaluations over the boolean hypercube.
|
||||
#[cfg(test)]
|
||||
pub fn random_mle_list<F: PrimeField, R: RngCore>(
|
||||
nv: usize,
|
||||
degree: usize,
|
||||
rng: &mut R,
|
||||
) -> (Vec<Arc<DenseMultilinearExtension<F>>>, F) {
|
||||
let start = start_timer!(|| "sample random mle list");
|
||||
let mut multiplicands = Vec::with_capacity(degree);
|
||||
for _ in 0..degree {
|
||||
multiplicands.push(Vec::with_capacity(1 << nv))
|
||||
}
|
||||
let mut sum = F::zero();
|
||||
|
||||
for _ in 0..(1 << nv) {
|
||||
let mut product = F::one();
|
||||
|
||||
for e in multiplicands.iter_mut() {
|
||||
let val = F::rand(rng);
|
||||
e.push(val);
|
||||
product *= val;
|
||||
}
|
||||
sum += product;
|
||||
}
|
||||
|
||||
let list = multiplicands
|
||||
.into_iter()
|
||||
.map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x)))
|
||||
.collect();
|
||||
|
||||
end_timer!(start);
|
||||
(list, sum)
|
||||
}
|
||||
|
||||
// Build a randomize list of mle-s whose sum is zero.
|
||||
#[cfg(test)]
|
||||
pub fn random_zero_mle_list<F: PrimeField, R: RngCore>(
|
||||
nv: usize,
|
||||
degree: usize,
|
||||
rng: &mut R,
|
||||
) -> Vec<Arc<DenseMultilinearExtension<F>>> {
|
||||
let start = start_timer!(|| "sample random zero mle list");
|
||||
|
||||
let mut multiplicands = Vec::with_capacity(degree);
|
||||
for _ in 0..degree {
|
||||
multiplicands.push(Vec::with_capacity(1 << nv))
|
||||
}
|
||||
for _ in 0..(1 << nv) {
|
||||
multiplicands[0].push(F::zero());
|
||||
for e in multiplicands.iter_mut().skip(1) {
|
||||
e.push(F::rand(rng));
|
||||
}
|
||||
}
|
||||
|
||||
let list = multiplicands
|
||||
.into_iter()
|
||||
.map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x)))
|
||||
.collect();
|
||||
|
||||
end_timer!(start);
|
||||
list
|
||||
}
|
||||
}
|
||||
252
folding-schemes/src/utils/espresso/sum_check/mod.rs
Normal file
252
folding-schemes/src/utils/espresso/sum_check/mod.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
// code forked from:
|
||||
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
|
||||
//
|
||||
// Copyright (c) 2023 Espresso Systems (espressosys.com)
|
||||
// This file is part of the HyperPlonk library.
|
||||
|
||||
// You should have received a copy of the MIT License
|
||||
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
|
||||
|
||||
//! This module implements the sum check protocol.
|
||||
|
||||
use crate::{
|
||||
transcript::Transcript,
|
||||
utils::virtual_polynomial::{VPAuxInfo, VirtualPolynomial},
|
||||
};
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_poly::{DenseMultilinearExtension, DenseUVPolynomial, Polynomial};
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use std::{fmt::Debug, marker::PhantomData, sync::Arc};
|
||||
|
||||
use crate::utils::sum_check::structs::IOPProverMessage;
|
||||
use crate::utils::sum_check::structs::IOPVerifierState;
|
||||
use ark_ff::Field;
|
||||
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
|
||||
use structs::{IOPProof, IOPProverState};
|
||||
|
||||
mod prover;
|
||||
pub mod structs;
|
||||
pub mod verifier;
|
||||
|
||||
/// A generic sum-check trait over a curve group
|
||||
pub trait SumCheck<C: CurveGroup> {
|
||||
type VirtualPolynomial;
|
||||
type VPAuxInfo;
|
||||
type MultilinearExtension;
|
||||
|
||||
type SumCheckProof: Clone + Debug + Default + PartialEq;
|
||||
type SumCheckSubClaim: Clone + Debug + Default + PartialEq;
|
||||
|
||||
/// Extract sum from the proof
|
||||
fn extract_sum(proof: &Self::SumCheckProof) -> C::ScalarField;
|
||||
|
||||
/// Generate proof of the sum of polynomial over {0,1}^`num_vars`
|
||||
///
|
||||
/// The polynomial is represented in the form of a VirtualPolynomial.
|
||||
fn prove(
|
||||
poly: &Self::VirtualPolynomial,
|
||||
transcript: &mut impl Transcript<C>,
|
||||
) -> Result<Self::SumCheckProof, PolyIOPErrors>;
|
||||
|
||||
/// Verify the claimed sum using the proof
|
||||
fn verify(
|
||||
sum: C::ScalarField,
|
||||
proof: &Self::SumCheckProof,
|
||||
aux_info: &Self::VPAuxInfo,
|
||||
transcript: &mut impl Transcript<C>,
|
||||
) -> Result<Self::SumCheckSubClaim, PolyIOPErrors>;
|
||||
}
|
||||
|
||||
/// Trait for sum check protocol prover side APIs.
|
||||
pub trait SumCheckProver<C: CurveGroup>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
type VirtualPolynomial;
|
||||
type ProverMessage;
|
||||
|
||||
/// Initialize the prover state to argue for the sum of the input polynomial
|
||||
/// over {0,1}^`num_vars`.
|
||||
fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result<Self, PolyIOPErrors>;
|
||||
|
||||
/// Receive message from verifier, generate prover message, and proceed to
|
||||
/// next round.
|
||||
///
|
||||
/// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2).
|
||||
fn prove_round_and_update_state(
|
||||
&mut self,
|
||||
challenge: &Option<C::ScalarField>,
|
||||
) -> Result<Self::ProverMessage, PolyIOPErrors>;
|
||||
}
|
||||
|
||||
/// Trait for sum check protocol verifier side APIs.
|
||||
pub trait SumCheckVerifier<C: CurveGroup> {
|
||||
type VPAuxInfo;
|
||||
type ProverMessage;
|
||||
type Challenge;
|
||||
type SumCheckSubClaim;
|
||||
|
||||
/// Initialize the verifier's state.
|
||||
fn verifier_init(index_info: &Self::VPAuxInfo) -> Self;
|
||||
|
||||
/// Run verifier for the current round, given a prover message.
|
||||
///
|
||||
/// Note that `verify_round_and_update_state` only samples and stores
|
||||
/// challenges; and update the verifier's state accordingly. The actual
|
||||
/// verifications are deferred (in batch) to `check_and_generate_subclaim`
|
||||
/// at the last step.
|
||||
fn verify_round_and_update_state(
|
||||
&mut self,
|
||||
prover_msg: &Self::ProverMessage,
|
||||
transcript: &mut impl Transcript<C>,
|
||||
) -> Result<Self::Challenge, PolyIOPErrors>;
|
||||
|
||||
/// This function verifies the deferred checks in the interactive version of
|
||||
/// the protocol; and generate the subclaim. Returns an error if the
|
||||
/// proof failed to verify.
|
||||
///
|
||||
/// If the asserted sum is correct, then the multilinear polynomial
|
||||
/// evaluated at `subclaim.point` will be `subclaim.expected_evaluation`.
|
||||
/// Otherwise, it is highly unlikely that those two will be equal.
|
||||
/// Larger field size guarantees smaller soundness error.
|
||||
fn check_and_generate_subclaim(
|
||||
&self,
|
||||
asserted_sum: &C::ScalarField,
|
||||
) -> Result<Self::SumCheckSubClaim, PolyIOPErrors>;
|
||||
}
|
||||
|
||||
/// A SumCheckSubClaim is a claim generated by the verifier at the end of
|
||||
/// verification when it is convinced.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct SumCheckSubClaim<F: PrimeField> {
|
||||
/// the multi-dimensional point that this multilinear extension is evaluated
|
||||
/// to
|
||||
pub point: Vec<F>,
|
||||
/// the expected evaluation
|
||||
pub expected_evaluation: F,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Copy, PartialEq, Eq)]
|
||||
pub struct IOPSumCheck<C: CurveGroup, T: Transcript<C>> {
|
||||
#[doc(hidden)]
|
||||
phantom: PhantomData<C>,
|
||||
#[doc(hidden)]
|
||||
phantom2: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<C: CurveGroup, T: Transcript<C>> SumCheck<C> for IOPSumCheck<C, T> {
|
||||
type SumCheckProof = IOPProof<C::ScalarField>;
|
||||
type VirtualPolynomial = VirtualPolynomial<C::ScalarField>;
|
||||
type VPAuxInfo = VPAuxInfo<C::ScalarField>;
|
||||
type MultilinearExtension = Arc<DenseMultilinearExtension<C::ScalarField>>;
|
||||
type SumCheckSubClaim = SumCheckSubClaim<C::ScalarField>;
|
||||
|
||||
fn extract_sum(proof: &Self::SumCheckProof) -> C::ScalarField {
|
||||
let start = start_timer!(|| "extract sum");
|
||||
let poly = DensePolynomial::from_coefficients_vec(proof.proofs[0].coeffs.clone());
|
||||
let res = poly.evaluate(&C::ScalarField::ONE) + poly.evaluate(&C::ScalarField::ZERO);
|
||||
end_timer!(start);
|
||||
res
|
||||
}
|
||||
|
||||
fn prove(
|
||||
poly: &VirtualPolynomial<C::ScalarField>,
|
||||
transcript: &mut impl Transcript<C>,
|
||||
) -> Result<IOPProof<C::ScalarField>, PolyIOPErrors> {
|
||||
transcript.absorb(&C::ScalarField::from(poly.aux_info.num_variables as u64));
|
||||
transcript.absorb(&C::ScalarField::from(poly.aux_info.max_degree as u64));
|
||||
let mut prover_state: IOPProverState<C> = IOPProverState::prover_init(poly)?;
|
||||
let mut challenge: Option<C::ScalarField> = None;
|
||||
let mut prover_msgs: Vec<IOPProverMessage<C::ScalarField>> =
|
||||
Vec::with_capacity(poly.aux_info.num_variables);
|
||||
for _ in 0..poly.aux_info.num_variables {
|
||||
let prover_msg: IOPProverMessage<C::ScalarField> =
|
||||
IOPProverState::prove_round_and_update_state(&mut prover_state, &challenge)?;
|
||||
transcript.absorb_vec(&prover_msg.coeffs);
|
||||
prover_msgs.push(prover_msg);
|
||||
challenge = Some(transcript.get_challenge());
|
||||
}
|
||||
if let Some(p) = challenge {
|
||||
prover_state.challenges.push(p)
|
||||
};
|
||||
Ok(IOPProof {
|
||||
point: prover_state.challenges,
|
||||
proofs: prover_msgs,
|
||||
})
|
||||
}
|
||||
|
||||
fn verify(
|
||||
claimed_sum: C::ScalarField,
|
||||
proof: &IOPProof<C::ScalarField>,
|
||||
aux_info: &VPAuxInfo<C::ScalarField>,
|
||||
transcript: &mut impl Transcript<C>,
|
||||
) -> Result<SumCheckSubClaim<C::ScalarField>, PolyIOPErrors> {
|
||||
transcript.absorb(&C::ScalarField::from(aux_info.num_variables as u64));
|
||||
transcript.absorb(&C::ScalarField::from(aux_info.max_degree as u64));
|
||||
let mut verifier_state = IOPVerifierState::verifier_init(aux_info);
|
||||
for i in 0..aux_info.num_variables {
|
||||
let prover_msg = proof.proofs.get(i).expect("proof is incomplete");
|
||||
transcript.absorb_vec(&prover_msg.coeffs);
|
||||
IOPVerifierState::verify_round_and_update_state(
|
||||
&mut verifier_state,
|
||||
prover_msg,
|
||||
transcript,
|
||||
)?;
|
||||
}
|
||||
|
||||
IOPVerifierState::check_and_generate_subclaim(&verifier_state, &claimed_sum)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use ark_ff::Field;
|
||||
use ark_pallas::Fr;
|
||||
use ark_pallas::Projective;
|
||||
use ark_poly::DenseMultilinearExtension;
|
||||
use ark_poly::MultilinearExtension;
|
||||
use ark_std::test_rng;
|
||||
|
||||
use crate::transcript::poseidon::poseidon_test_config;
|
||||
use crate::transcript::poseidon::PoseidonTranscript;
|
||||
use crate::transcript::Transcript;
|
||||
use crate::utils::sum_check::SumCheck;
|
||||
use crate::utils::virtual_polynomial::VirtualPolynomial;
|
||||
|
||||
use super::IOPSumCheck;
|
||||
|
||||
#[test]
|
||||
pub fn sumcheck_poseidon() {
|
||||
let mut rng = test_rng();
|
||||
let poly_mle = DenseMultilinearExtension::rand(5, &mut rng);
|
||||
let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), Fr::ONE);
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
// sum-check prove
|
||||
let mut poseidon_transcript_prove: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
let sum_check = IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::prove(
|
||||
&virtual_poly,
|
||||
&mut poseidon_transcript_prove,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// sum-check verify
|
||||
let claimed_sum =
|
||||
IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::extract_sum(&sum_check);
|
||||
let mut poseidon_transcript_verify: PoseidonTranscript<Projective> =
|
||||
PoseidonTranscript::<Projective>::new(&poseidon_config);
|
||||
let res_verify = IOPSumCheck::<Projective, PoseidonTranscript<Projective>>::verify(
|
||||
claimed_sum,
|
||||
&sum_check,
|
||||
&virtual_poly.aux_info,
|
||||
&mut poseidon_transcript_verify,
|
||||
);
|
||||
|
||||
assert!(res_verify.is_ok());
|
||||
}
|
||||
}
|
||||
228
folding-schemes/src/utils/espresso/sum_check/prover.rs
Normal file
228
folding-schemes/src/utils/espresso/sum_check/prover.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
// code forked from:
|
||||
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
|
||||
//
|
||||
// Copyright (c) 2023 Espresso Systems (espressosys.com)
|
||||
// This file is part of the HyperPlonk library.
|
||||
|
||||
// You should have received a copy of the MIT License
|
||||
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
|
||||
|
||||
//! Prover subroutines for a SumCheck protocol.
|
||||
|
||||
use super::SumCheckProver;
|
||||
use crate::utils::{
|
||||
lagrange_poly::compute_lagrange_interpolated_poly, multilinear_polynomial::fix_variables,
|
||||
virtual_polynomial::VirtualPolynomial,
|
||||
};
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::Field;
|
||||
use ark_ff::{batch_inversion, PrimeField};
|
||||
use ark_poly::DenseMultilinearExtension;
|
||||
use ark_std::{cfg_into_iter, end_timer, start_timer, vec::Vec};
|
||||
use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::structs::{IOPProverMessage, IOPProverState};
|
||||
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
|
||||
|
||||
// #[cfg(feature = "parallel")]
|
||||
use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator};
|
||||
|
||||
impl<C: CurveGroup> SumCheckProver<C> for IOPProverState<C> {
|
||||
type VirtualPolynomial = VirtualPolynomial<C::ScalarField>;
|
||||
type ProverMessage = IOPProverMessage<C::ScalarField>;
|
||||
|
||||
/// Initialize the prover state to argue for the sum of the input polynomial
|
||||
/// over {0,1}^`num_vars`.
|
||||
fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result<Self, PolyIOPErrors> {
|
||||
let start = start_timer!(|| "sum check prover init");
|
||||
if polynomial.aux_info.num_variables == 0 {
|
||||
return Err(PolyIOPErrors::InvalidParameters(
|
||||
"Attempt to prove a constant.".to_string(),
|
||||
));
|
||||
}
|
||||
end_timer!(start);
|
||||
|
||||
Ok(Self {
|
||||
challenges: Vec::with_capacity(polynomial.aux_info.num_variables),
|
||||
round: 0,
|
||||
poly: polynomial.clone(),
|
||||
extrapolation_aux: (1..polynomial.aux_info.max_degree)
|
||||
.map(|degree| {
|
||||
let points = (0..1 + degree as u64)
|
||||
.map(C::ScalarField::from)
|
||||
.collect::<Vec<_>>();
|
||||
let weights = barycentric_weights(&points);
|
||||
(points, weights)
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Receive message from verifier, generate prover message, and proceed to
|
||||
/// next round.
|
||||
///
|
||||
/// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2).
|
||||
fn prove_round_and_update_state(
|
||||
&mut self,
|
||||
challenge: &Option<C::ScalarField>,
|
||||
) -> Result<Self::ProverMessage, PolyIOPErrors> {
|
||||
// let start =
|
||||
// start_timer!(|| format!("sum check prove {}-th round and update state",
|
||||
// self.round));
|
||||
|
||||
if self.round >= self.poly.aux_info.num_variables {
|
||||
return Err(PolyIOPErrors::InvalidProver(
|
||||
"Prover is not active".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// let fix_argument = start_timer!(|| "fix argument");
|
||||
|
||||
// Step 1:
|
||||
// fix argument and evaluate f(x) over x_m = r; where r is the challenge
|
||||
// for the current round, and m is the round number, indexed from 1
|
||||
//
|
||||
// i.e.:
|
||||
// at round m <= n, for each mle g(x_1, ... x_n) within the flattened_mle
|
||||
// which has already been evaluated to
|
||||
//
|
||||
// g(r_1, ..., r_{m-1}, x_m ... x_n)
|
||||
//
|
||||
// eval g over r_m, and mutate g to g(r_1, ... r_m,, x_{m+1}... x_n)
|
||||
let mut flattened_ml_extensions: Vec<DenseMultilinearExtension<C::ScalarField>> = self
|
||||
.poly
|
||||
.flattened_ml_extensions
|
||||
.par_iter()
|
||||
.map(|x| x.as_ref().clone())
|
||||
.collect();
|
||||
|
||||
if let Some(chal) = challenge {
|
||||
if self.round == 0 {
|
||||
return Err(PolyIOPErrors::InvalidProver(
|
||||
"first round should be prover first.".to_string(),
|
||||
));
|
||||
}
|
||||
self.challenges.push(*chal);
|
||||
|
||||
let r = self.challenges[self.round - 1];
|
||||
// #[cfg(feature = "parallel")]
|
||||
flattened_ml_extensions
|
||||
.par_iter_mut()
|
||||
.for_each(|mle| *mle = fix_variables(mle, &[r]));
|
||||
// #[cfg(not(feature = "parallel"))]
|
||||
// flattened_ml_extensions
|
||||
// .iter_mut()
|
||||
// .for_each(|mle| *mle = fix_variables(mle, &[r]));
|
||||
} else if self.round > 0 {
|
||||
return Err(PolyIOPErrors::InvalidProver(
|
||||
"verifier message is empty".to_string(),
|
||||
));
|
||||
}
|
||||
// end_timer!(fix_argument);
|
||||
|
||||
self.round += 1;
|
||||
|
||||
let products_list = self.poly.products.clone();
|
||||
let mut products_sum = vec![C::ScalarField::ZERO; self.poly.aux_info.max_degree + 1];
|
||||
|
||||
// Step 2: generate sum for the partial evaluated polynomial:
|
||||
// f(r_1, ... r_m,, x_{m+1}... x_n)
|
||||
|
||||
products_list.iter().for_each(|(coefficient, products)| {
|
||||
let mut sum = cfg_into_iter!(0..1 << (self.poly.aux_info.num_variables - self.round))
|
||||
.fold(
|
||||
|| {
|
||||
(
|
||||
vec![(C::ScalarField::ZERO, C::ScalarField::ZERO); products.len()],
|
||||
vec![C::ScalarField::ZERO; products.len() + 1],
|
||||
)
|
||||
},
|
||||
|(mut buf, mut acc), b| {
|
||||
buf.iter_mut()
|
||||
.zip(products.iter())
|
||||
.for_each(|((eval, step), f)| {
|
||||
let table = &flattened_ml_extensions[*f];
|
||||
*eval = table[b << 1];
|
||||
*step = table[(b << 1) + 1] - table[b << 1];
|
||||
});
|
||||
acc[0] += buf.iter().map(|(eval, _)| eval).product::<C::ScalarField>();
|
||||
acc[1..].iter_mut().for_each(|acc| {
|
||||
buf.iter_mut().for_each(|(eval, step)| *eval += step as &_);
|
||||
*acc += buf.iter().map(|(eval, _)| eval).product::<C::ScalarField>();
|
||||
});
|
||||
(buf, acc)
|
||||
},
|
||||
)
|
||||
.map(|(_, partial)| partial)
|
||||
.reduce(
|
||||
|| vec![C::ScalarField::ZERO; products.len() + 1],
|
||||
|mut sum, partial| {
|
||||
sum.iter_mut()
|
||||
.zip(partial.iter())
|
||||
.for_each(|(sum, partial)| *sum += partial);
|
||||
sum
|
||||
},
|
||||
);
|
||||
sum.iter_mut().for_each(|sum| *sum *= coefficient);
|
||||
let extraploation = cfg_into_iter!(0..self.poly.aux_info.max_degree - products.len())
|
||||
.map(|i| {
|
||||
let (points, weights) = &self.extrapolation_aux[products.len() - 1];
|
||||
let at = C::ScalarField::from((products.len() + 1 + i) as u64);
|
||||
extrapolate(points, weights, &sum, &at)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
products_sum
|
||||
.iter_mut()
|
||||
.zip(sum.iter().chain(extraploation.iter()))
|
||||
.for_each(|(products_sum, sum)| *products_sum += sum);
|
||||
});
|
||||
|
||||
// update prover's state to the partial evaluated polynomial
|
||||
self.poly.flattened_ml_extensions = flattened_ml_extensions
|
||||
.par_iter()
|
||||
.map(|x| Arc::new(x.clone()))
|
||||
.collect();
|
||||
|
||||
let prover_poly = compute_lagrange_interpolated_poly::<C::ScalarField>(&products_sum);
|
||||
Ok(IOPProverMessage {
|
||||
coeffs: prover_poly.coeffs,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::filter_map_bool_then)]
|
||||
fn barycentric_weights<F: PrimeField>(points: &[F]) -> Vec<F> {
|
||||
let mut weights = points
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(j, point_j)| {
|
||||
points
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, point_i)| (i != j).then(|| *point_j - point_i))
|
||||
.reduce(|acc, value| acc * value)
|
||||
.unwrap_or_else(F::one)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
batch_inversion(&mut weights);
|
||||
weights
|
||||
}
|
||||
|
||||
fn extrapolate<F: PrimeField>(points: &[F], weights: &[F], evals: &[F], at: &F) -> F {
|
||||
let (coeffs, sum_inv) = {
|
||||
let mut coeffs = points.iter().map(|point| *at - point).collect::<Vec<_>>();
|
||||
batch_inversion(&mut coeffs);
|
||||
coeffs.iter_mut().zip(weights).for_each(|(coeff, weight)| {
|
||||
*coeff *= weight;
|
||||
});
|
||||
let sum_inv = coeffs.iter().sum::<F>().inverse().unwrap_or_default();
|
||||
(coeffs, sum_inv)
|
||||
};
|
||||
coeffs
|
||||
.iter()
|
||||
.zip(evals)
|
||||
.map(|(coeff, eval)| *coeff * eval)
|
||||
.sum::<F>()
|
||||
* sum_inv
|
||||
}
|
||||
60
folding-schemes/src/utils/espresso/sum_check/structs.rs
Normal file
60
folding-schemes/src/utils/espresso/sum_check/structs.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// code forked from:
|
||||
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
|
||||
//
|
||||
// Copyright (c) 2023 Espresso Systems (espressosys.com)
|
||||
// This file is part of the HyperPlonk library.
|
||||
|
||||
// You should have received a copy of the MIT License
|
||||
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
|
||||
|
||||
//! This module defines structs that are shared by all sub protocols.
|
||||
|
||||
use crate::utils::virtual_polynomial::VirtualPolynomial;
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
|
||||
/// An IOP proof is a collections of
|
||||
/// - messages from prover to verifier at each round through the interactive
|
||||
/// protocol.
|
||||
/// - a point that is generated by the transcript for evaluation
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct IOPProof<F: PrimeField> {
|
||||
pub point: Vec<F>,
|
||||
pub proofs: Vec<IOPProverMessage<F>>,
|
||||
}
|
||||
|
||||
/// A message from the prover to the verifier at a given round
|
||||
/// is a list of coeffs.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)]
|
||||
pub struct IOPProverMessage<F: PrimeField> {
|
||||
pub(crate) coeffs: Vec<F>,
|
||||
}
|
||||
|
||||
/// Prover State of a PolyIOP.
|
||||
#[derive(Debug)]
|
||||
pub struct IOPProverState<C: CurveGroup> {
|
||||
/// sampled randomness given by the verifier
|
||||
pub challenges: Vec<C::ScalarField>,
|
||||
/// the current round number
|
||||
pub(crate) round: usize,
|
||||
/// pointer to the virtual polynomial
|
||||
pub(crate) poly: VirtualPolynomial<C::ScalarField>,
|
||||
/// points with precomputed barycentric weights for extrapolating smaller
|
||||
/// degree uni-polys to `max_degree + 1` evaluations.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub(crate) extrapolation_aux: Vec<(Vec<C::ScalarField>, Vec<C::ScalarField>)>,
|
||||
}
|
||||
|
||||
/// Verifier State of a PolyIOP, generic over a curve group
|
||||
#[derive(Debug)]
|
||||
pub struct IOPVerifierState<C: CurveGroup> {
|
||||
pub(crate) round: usize,
|
||||
pub(crate) num_vars: usize,
|
||||
pub(crate) finished: bool,
|
||||
/// a list storing the univariate polynomial in evaluation form sent by the
|
||||
/// prover at each round
|
||||
pub(crate) polynomials_received: Vec<Vec<C::ScalarField>>,
|
||||
/// a list storing the randomness sampled by the verifier at each round
|
||||
pub(crate) challenges: Vec<C::ScalarField>,
|
||||
}
|
||||
300
folding-schemes/src/utils/espresso/sum_check/verifier.rs
Normal file
300
folding-schemes/src/utils/espresso/sum_check/verifier.rs
Normal file
@@ -0,0 +1,300 @@
|
||||
// code forked from:
|
||||
// https://github.com/EspressoSystems/hyperplonk/tree/main/subroutines/src/poly_iop/sum_check
|
||||
//
|
||||
// Copyright (c) 2023 Espresso Systems (espressosys.com)
|
||||
// This file is part of the HyperPlonk library.
|
||||
|
||||
// You should have received a copy of the MIT License
|
||||
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
|
||||
|
||||
//! Verifier subroutines for a SumCheck protocol.
|
||||
|
||||
use super::{
|
||||
structs::{IOPProverMessage, IOPVerifierState},
|
||||
SumCheckSubClaim, SumCheckVerifier,
|
||||
};
|
||||
use crate::{transcript::Transcript, utils::virtual_polynomial::VPAuxInfo};
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::Polynomial;
|
||||
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
|
||||
|
||||
impl<C: CurveGroup> SumCheckVerifier<C> for IOPVerifierState<C> {
|
||||
type VPAuxInfo = VPAuxInfo<C::ScalarField>;
|
||||
type ProverMessage = IOPProverMessage<C::ScalarField>;
|
||||
type Challenge = C::ScalarField;
|
||||
type SumCheckSubClaim = SumCheckSubClaim<C::ScalarField>;
|
||||
|
||||
/// Initialize the verifier's state.
|
||||
fn verifier_init(index_info: &Self::VPAuxInfo) -> Self {
|
||||
let start = start_timer!(|| "sum check verifier init");
|
||||
let res = Self {
|
||||
round: 1,
|
||||
num_vars: index_info.num_variables,
|
||||
finished: false,
|
||||
polynomials_received: Vec::with_capacity(index_info.num_variables),
|
||||
challenges: Vec::with_capacity(index_info.num_variables),
|
||||
};
|
||||
end_timer!(start);
|
||||
res
|
||||
}
|
||||
|
||||
fn verify_round_and_update_state(
|
||||
&mut self,
|
||||
prover_msg: &<IOPVerifierState<C> as SumCheckVerifier<C>>::ProverMessage,
|
||||
transcript: &mut impl Transcript<C>,
|
||||
) -> Result<<IOPVerifierState<C> as SumCheckVerifier<C>>::Challenge, PolyIOPErrors> {
|
||||
let start =
|
||||
start_timer!(|| format!("sum check verify {}-th round and update state", self.round));
|
||||
|
||||
if self.finished {
|
||||
return Err(PolyIOPErrors::InvalidVerifier(
|
||||
"Incorrect verifier state: Verifier is already finished.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// In an interactive protocol, the verifier should
|
||||
//
|
||||
// 1. check if the received 'P(0) + P(1) = expected`.
|
||||
// 2. set `expected` to P(r)`
|
||||
//
|
||||
// When we turn the protocol to a non-interactive one, it is sufficient to defer
|
||||
// such checks to `check_and_generate_subclaim` after the last round.
|
||||
let challenge = transcript.get_challenge();
|
||||
self.challenges.push(challenge);
|
||||
self.polynomials_received.push(prover_msg.coeffs.to_vec());
|
||||
|
||||
if self.round == self.num_vars {
|
||||
// accept and close
|
||||
self.finished = true;
|
||||
} else {
|
||||
// proceed to the next round
|
||||
self.round += 1;
|
||||
}
|
||||
|
||||
end_timer!(start);
|
||||
Ok(challenge)
|
||||
}
|
||||
|
||||
fn check_and_generate_subclaim(
|
||||
&self,
|
||||
asserted_sum: &C::ScalarField,
|
||||
) -> Result<Self::SumCheckSubClaim, PolyIOPErrors> {
|
||||
let start = start_timer!(|| "sum check check and generate subclaim");
|
||||
if !self.finished {
|
||||
return Err(PolyIOPErrors::InvalidVerifier(
|
||||
"Incorrect verifier state: Verifier has not finished.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if self.polynomials_received.len() != self.num_vars {
|
||||
return Err(PolyIOPErrors::InvalidVerifier(
|
||||
"insufficient rounds".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// the deferred check during the interactive phase:
|
||||
// 2. set `expected` to P(r)`
|
||||
#[cfg(feature = "parallel")]
|
||||
let mut expected_vec = self
|
||||
.polynomials_received
|
||||
.clone()
|
||||
.into_par_iter()
|
||||
.zip(self.challenges.clone().into_par_iter())
|
||||
.map(|(coeffs, challenge)| {
|
||||
// Removed check on number of evaluations here since verifier receives polynomial in coeffs form
|
||||
let prover_poly = DensePolynomial::from_coefficients_slice(&coeffs);
|
||||
Ok(prover_poly.evaluate(&challenge))
|
||||
})
|
||||
.collect::<Result<Vec<_>, PolyIOPErrors>>()?;
|
||||
|
||||
#[cfg(not(feature = "parallel"))]
|
||||
let mut expected_vec = self
|
||||
.polynomials_received
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(self.challenges.clone().into_iter())
|
||||
.map(|(evaluations, challenge)| {
|
||||
// Removed check on number of evaluations here since verifier receives polynomial in coeffs form
|
||||
let prover_poly = DensePolynomial::from_coefficients_slice(&coeffs);
|
||||
Ok(prover_poly.evaluate(&challenge))
|
||||
})
|
||||
.collect::<Result<Vec<_>, PolyIOPErrors>>()?;
|
||||
|
||||
// insert the asserted_sum to the first position of the expected vector
|
||||
expected_vec.insert(0, *asserted_sum);
|
||||
|
||||
for (coeffs, &expected) in self
|
||||
.polynomials_received
|
||||
.iter()
|
||||
.zip(expected_vec.iter())
|
||||
.take(self.num_vars)
|
||||
{
|
||||
let poly = DensePolynomial::from_coefficients_slice(coeffs);
|
||||
let eval_at_one: C::ScalarField = poly.iter().sum();
|
||||
let eval_at_zero: C::ScalarField = poly.coeffs[0];
|
||||
let eval = eval_at_one + eval_at_zero;
|
||||
|
||||
println!("evaluations: {:?}, expected: {:?}", eval, expected);
|
||||
// the deferred check during the interactive phase:
|
||||
// 1. check if the received 'P(0) + P(1) = expected`.
|
||||
if eval != expected {
|
||||
return Err(PolyIOPErrors::InvalidProof(
|
||||
"Prover message is not consistent with the claim.".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
end_timer!(start);
|
||||
Ok(SumCheckSubClaim {
|
||||
point: self.challenges.clone(),
|
||||
// the last expected value (not checked within this function) will be included in the
|
||||
// subclaim
|
||||
expected_evaluation: expected_vec[self.num_vars],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpolate a uni-variate degree-`p_i.len()-1` polynomial and evaluate this
|
||||
/// polynomial at `eval_at`:
|
||||
///
|
||||
/// \sum_{i=0}^len p_i * (\prod_{j!=i} (eval_at - j)/(i-j) )
|
||||
///
|
||||
/// This implementation is linear in number of inputs in terms of field
|
||||
/// operations. It also has a quadratic term in primitive operations which is
|
||||
/// negligible compared to field operations.
|
||||
/// TODO: The quadratic term can be removed by precomputing the lagrange
|
||||
/// coefficients.
|
||||
pub fn interpolate_uni_poly<F: PrimeField>(p_i: &[F], eval_at: F) -> Result<F, PolyIOPErrors> {
|
||||
let start = start_timer!(|| "sum check interpolate uni poly opt");
|
||||
|
||||
let len = p_i.len();
|
||||
let mut evals = vec![];
|
||||
let mut prod = eval_at;
|
||||
evals.push(eval_at);
|
||||
|
||||
// `prod = \prod_{j} (eval_at - j)`
|
||||
for e in 1..len {
|
||||
let tmp = eval_at - F::from(e as u64);
|
||||
evals.push(tmp);
|
||||
prod *= tmp;
|
||||
}
|
||||
let mut res = F::zero();
|
||||
// we want to compute \prod (j!=i) (i-j) for a given i
|
||||
//
|
||||
// we start from the last step, which is
|
||||
// denom[len-1] = (len-1) * (len-2) *... * 2 * 1
|
||||
// the step before that is
|
||||
// denom[len-2] = (len-2) * (len-3) * ... * 2 * 1 * -1
|
||||
// and the step before that is
|
||||
// denom[len-3] = (len-3) * (len-4) * ... * 2 * 1 * -1 * -2
|
||||
//
|
||||
// i.e., for any i, the one before this will be derived from
|
||||
// denom[i-1] = denom[i] * (len-i) / i
|
||||
//
|
||||
// that is, we only need to store
|
||||
// - the last denom for i = len-1, and
|
||||
// - the ratio between current step and fhe last step, which is the product of
|
||||
// (len-i) / i from all previous steps and we store this product as a fraction
|
||||
// number to reduce field divisions.
|
||||
|
||||
// We know
|
||||
// - 2^61 < factorial(20) < 2^62
|
||||
// - 2^122 < factorial(33) < 2^123
|
||||
// so we will be able to compute the ratio
|
||||
// - for len <= 20 with i64
|
||||
// - for len <= 33 with i128
|
||||
// - for len > 33 with BigInt
|
||||
if p_i.len() <= 20 {
|
||||
let last_denominator = F::from(u64_factorial(len - 1));
|
||||
let mut ratio_numerator = 1i64;
|
||||
let mut ratio_denominator = 1u64;
|
||||
|
||||
for i in (0..len).rev() {
|
||||
let ratio_numerator_f = if ratio_numerator < 0 {
|
||||
-F::from((-ratio_numerator) as u64)
|
||||
} else {
|
||||
F::from(ratio_numerator as u64)
|
||||
};
|
||||
|
||||
res += p_i[i] * prod * F::from(ratio_denominator)
|
||||
/ (last_denominator * ratio_numerator_f * evals[i]);
|
||||
|
||||
// compute denom for the next step is current_denom * (len-i)/i
|
||||
if i != 0 {
|
||||
ratio_numerator *= -(len as i64 - i as i64);
|
||||
ratio_denominator *= i as u64;
|
||||
}
|
||||
}
|
||||
} else if p_i.len() <= 33 {
|
||||
let last_denominator = F::from(u128_factorial(len - 1));
|
||||
let mut ratio_numerator = 1i128;
|
||||
let mut ratio_denominator = 1u128;
|
||||
|
||||
for i in (0..len).rev() {
|
||||
let ratio_numerator_f = if ratio_numerator < 0 {
|
||||
-F::from((-ratio_numerator) as u128)
|
||||
} else {
|
||||
F::from(ratio_numerator as u128)
|
||||
};
|
||||
|
||||
res += p_i[i] * prod * F::from(ratio_denominator)
|
||||
/ (last_denominator * ratio_numerator_f * evals[i]);
|
||||
|
||||
// compute denom for the next step is current_denom * (len-i)/i
|
||||
if i != 0 {
|
||||
ratio_numerator *= -(len as i128 - i as i128);
|
||||
ratio_denominator *= i as u128;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut denom_up = field_factorial::<F>(len - 1);
|
||||
let mut denom_down = F::one();
|
||||
|
||||
for i in (0..len).rev() {
|
||||
res += p_i[i] * prod * denom_down / (denom_up * evals[i]);
|
||||
|
||||
// compute denom for the next step is current_denom * (len-i)/i
|
||||
if i != 0 {
|
||||
denom_up *= -F::from((len - i) as u64);
|
||||
denom_down *= F::from(i as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
end_timer!(start);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// compute the factorial(a) = 1 * 2 * ... * a
|
||||
#[inline]
|
||||
fn field_factorial<F: PrimeField>(a: usize) -> F {
|
||||
let mut res = F::one();
|
||||
for i in 2..=a {
|
||||
res *= F::from(i as u64);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// compute the factorial(a) = 1 * 2 * ... * a
|
||||
#[inline]
|
||||
fn u128_factorial(a: usize) -> u128 {
|
||||
let mut res = 1u128;
|
||||
for i in 2..=a {
|
||||
res *= i as u128;
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// compute the factorial(a) = 1 * 2 * ... * a
|
||||
#[inline]
|
||||
fn u64_factorial(a: usize) -> u64 {
|
||||
let mut res = 1u64;
|
||||
for i in 2..=a {
|
||||
res *= i as u64;
|
||||
}
|
||||
res
|
||||
}
|
||||
550
folding-schemes/src/utils/espresso/virtual_polynomial.rs
Normal file
550
folding-schemes/src/utils/espresso/virtual_polynomial.rs
Normal file
@@ -0,0 +1,550 @@
|
||||
// code forked from
|
||||
// https://github.com/privacy-scaling-explorations/multifolding-poc/blob/main/src/espresso/virtual_polynomial.rs
|
||||
//
|
||||
// Copyright (c) 2023 Espresso Systems (espressosys.com)
|
||||
// This file is part of the HyperPlonk library.
|
||||
|
||||
// You should have received a copy of the MIT License
|
||||
// along with the HyperPlonk library. If not, see <https://mit-license.org/>.
|
||||
|
||||
//! This module defines our main mathematical object `VirtualPolynomial`; and
|
||||
//! various functions associated with it.
|
||||
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::{DenseMultilinearExtension, MultilinearExtension};
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use rayon::prelude::*;
|
||||
use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, sync::Arc};
|
||||
use thiserror::Error;
|
||||
|
||||
use ark_std::string::String;
|
||||
|
||||
//-- aritherrors
|
||||
/// A `enum` specifying the possible failure modes of the arithmetics.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ArithErrors {
|
||||
#[error("Invalid parameters: {0}")]
|
||||
InvalidParameters(String),
|
||||
#[error("Should not arrive to this point")]
|
||||
ShouldNotArrive,
|
||||
#[error("An error during (de)serialization: {0}")]
|
||||
SerializationErrors(ark_serialize::SerializationError),
|
||||
}
|
||||
|
||||
impl From<ark_serialize::SerializationError> for ArithErrors {
|
||||
fn from(e: ark_serialize::SerializationError) -> Self {
|
||||
Self::SerializationErrors(e)
|
||||
}
|
||||
}
|
||||
//-- aritherrors
|
||||
|
||||
#[rustfmt::skip]
|
||||
/// A virtual polynomial is a sum of products of multilinear polynomials;
|
||||
/// where the multilinear polynomials are stored via their multilinear
|
||||
/// extensions: `(coefficient, DenseMultilinearExtension)`
|
||||
///
|
||||
/// * Number of products n = `polynomial.products.len()`,
|
||||
/// * Number of multiplicands of ith product m_i =
|
||||
/// `polynomial.products[i].1.len()`,
|
||||
/// * Coefficient of ith product c_i = `polynomial.products[i].0`
|
||||
///
|
||||
/// The resulting polynomial is
|
||||
///
|
||||
/// $$ \sum_{i=0}^{n} c_i \cdot \prod_{j=0}^{m_i} P_{ij} $$
|
||||
///
|
||||
/// Example:
|
||||
/// f = c0 * f0 * f1 * f2 + c1 * f3 * f4
|
||||
/// where f0 ... f4 are multilinear polynomials
|
||||
///
|
||||
/// - flattened_ml_extensions stores the multilinear extension representation of
|
||||
/// f0, f1, f2, f3 and f4
|
||||
/// - products is
|
||||
/// \[
|
||||
/// (c0, \[0, 1, 2\]),
|
||||
/// (c1, \[3, 4\])
|
||||
/// \]
|
||||
/// - raw_pointers_lookup_table maps fi to i
|
||||
///
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
pub struct VirtualPolynomial<F: PrimeField> {
|
||||
/// Aux information about the multilinear polynomial
|
||||
pub aux_info: VPAuxInfo<F>,
|
||||
/// list of reference to products (as usize) of multilinear extension
|
||||
pub products: Vec<(F, Vec<usize>)>,
|
||||
/// Stores multilinear extensions in which product multiplicand can refer
|
||||
/// to.
|
||||
pub flattened_ml_extensions: Vec<Arc<DenseMultilinearExtension<F>>>,
|
||||
/// Pointers to the above poly extensions
|
||||
raw_pointers_lookup_table: HashMap<*const DenseMultilinearExtension<F>, usize>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)]
|
||||
/// Auxiliary information about the multilinear polynomial
|
||||
pub struct VPAuxInfo<F: PrimeField> {
|
||||
/// max number of multiplicands in each product
|
||||
pub max_degree: usize,
|
||||
/// number of variables of the polynomial
|
||||
pub num_variables: usize,
|
||||
/// Associated field
|
||||
#[doc(hidden)]
|
||||
pub phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> Add for &VirtualPolynomial<F> {
|
||||
type Output = VirtualPolynomial<F>;
|
||||
fn add(self, other: &VirtualPolynomial<F>) -> Self::Output {
|
||||
let start = start_timer!(|| "virtual poly add");
|
||||
let mut res = self.clone();
|
||||
for products in other.products.iter() {
|
||||
let cur: Vec<Arc<DenseMultilinearExtension<F>>> = products
|
||||
.1
|
||||
.iter()
|
||||
.map(|&x| other.flattened_ml_extensions[x].clone())
|
||||
.collect();
|
||||
|
||||
res.add_mle_list(cur, products.0)
|
||||
.expect("add product failed");
|
||||
}
|
||||
end_timer!(start);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: convert this into a trait
|
||||
impl<F: PrimeField> VirtualPolynomial<F> {
|
||||
/// Creates an empty virtual polynomial with `num_variables`.
|
||||
pub fn new(num_variables: usize) -> Self {
|
||||
VirtualPolynomial {
|
||||
aux_info: VPAuxInfo {
|
||||
max_degree: 0,
|
||||
num_variables,
|
||||
phantom: PhantomData,
|
||||
},
|
||||
products: Vec::new(),
|
||||
flattened_ml_extensions: Vec::new(),
|
||||
raw_pointers_lookup_table: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an new virtual polynomial from a MLE and its coefficient.
|
||||
pub fn new_from_mle(mle: &Arc<DenseMultilinearExtension<F>>, coefficient: F) -> Self {
|
||||
let mle_ptr: *const DenseMultilinearExtension<F> = Arc::as_ptr(mle);
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(mle_ptr, 0);
|
||||
|
||||
VirtualPolynomial {
|
||||
aux_info: VPAuxInfo {
|
||||
// The max degree is the max degree of any individual variable
|
||||
max_degree: 1,
|
||||
num_variables: mle.num_vars,
|
||||
phantom: PhantomData,
|
||||
},
|
||||
// here `0` points to the first polynomial of `flattened_ml_extensions`
|
||||
products: vec![(coefficient, vec![0])],
|
||||
flattened_ml_extensions: vec![mle.clone()],
|
||||
raw_pointers_lookup_table: hm,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a product of list of multilinear extensions to self
|
||||
/// Returns an error if the list is empty, or the MLE has a different
|
||||
/// `num_vars` from self.
|
||||
///
|
||||
/// The MLEs will be multiplied together, and then multiplied by the scalar
|
||||
/// `coefficient`.
|
||||
pub fn add_mle_list(
|
||||
&mut self,
|
||||
mle_list: impl IntoIterator<Item = Arc<DenseMultilinearExtension<F>>>,
|
||||
coefficient: F,
|
||||
) -> Result<(), ArithErrors> {
|
||||
let mle_list: Vec<Arc<DenseMultilinearExtension<F>>> = mle_list.into_iter().collect();
|
||||
let mut indexed_product = Vec::with_capacity(mle_list.len());
|
||||
|
||||
if mle_list.is_empty() {
|
||||
return Err(ArithErrors::InvalidParameters(
|
||||
"input mle_list is empty".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
self.aux_info.max_degree = max(self.aux_info.max_degree, mle_list.len());
|
||||
|
||||
for mle in mle_list {
|
||||
if mle.num_vars != self.aux_info.num_variables {
|
||||
return Err(ArithErrors::InvalidParameters(format!(
|
||||
"product has a multiplicand with wrong number of variables {} vs {}",
|
||||
mle.num_vars, self.aux_info.num_variables
|
||||
)));
|
||||
}
|
||||
|
||||
let mle_ptr: *const DenseMultilinearExtension<F> = Arc::as_ptr(&mle);
|
||||
if let Some(index) = self.raw_pointers_lookup_table.get(&mle_ptr) {
|
||||
indexed_product.push(*index)
|
||||
} else {
|
||||
let curr_index = self.flattened_ml_extensions.len();
|
||||
self.flattened_ml_extensions.push(mle.clone());
|
||||
self.raw_pointers_lookup_table.insert(mle_ptr, curr_index);
|
||||
indexed_product.push(curr_index);
|
||||
}
|
||||
}
|
||||
self.products.push((coefficient, indexed_product));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Multiple the current VirtualPolynomial by an MLE:
|
||||
/// - add the MLE to the MLE list;
|
||||
/// - multiple each product by MLE and its coefficient.
|
||||
/// Returns an error if the MLE has a different `num_vars` from self.
|
||||
pub fn mul_by_mle(
|
||||
&mut self,
|
||||
mle: Arc<DenseMultilinearExtension<F>>,
|
||||
coefficient: F,
|
||||
) -> Result<(), ArithErrors> {
|
||||
let start = start_timer!(|| "mul by mle");
|
||||
|
||||
if mle.num_vars != self.aux_info.num_variables {
|
||||
return Err(ArithErrors::InvalidParameters(format!(
|
||||
"product has a multiplicand with wrong number of variables {} vs {}",
|
||||
mle.num_vars, self.aux_info.num_variables
|
||||
)));
|
||||
}
|
||||
|
||||
let mle_ptr: *const DenseMultilinearExtension<F> = Arc::as_ptr(&mle);
|
||||
|
||||
// check if this mle already exists in the virtual polynomial
|
||||
let mle_index = match self.raw_pointers_lookup_table.get(&mle_ptr) {
|
||||
Some(&p) => p,
|
||||
None => {
|
||||
self.raw_pointers_lookup_table
|
||||
.insert(mle_ptr, self.flattened_ml_extensions.len());
|
||||
self.flattened_ml_extensions.push(mle);
|
||||
self.flattened_ml_extensions.len() - 1
|
||||
}
|
||||
};
|
||||
|
||||
for (prod_coef, indices) in self.products.iter_mut() {
|
||||
// - add the MLE to the MLE list;
|
||||
// - multiple each product by MLE and its coefficient.
|
||||
indices.push(mle_index);
|
||||
*prod_coef *= coefficient;
|
||||
}
|
||||
|
||||
// increase the max degree by one as the MLE has degree 1.
|
||||
self.aux_info.max_degree += 1;
|
||||
end_timer!(start);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Given virtual polynomial `p(x)` and scalar `s`, compute `s*p(x)`
|
||||
pub fn scalar_mul(&mut self, s: &F) {
|
||||
for (prod_coef, _) in self.products.iter_mut() {
|
||||
*prod_coef *= s;
|
||||
}
|
||||
}
|
||||
|
||||
/// Evaluate the virtual polynomial at point `point`.
|
||||
/// Returns an error is point.len() does not match `num_variables`.
|
||||
pub fn evaluate(&self, point: &[F]) -> Result<F, ArithErrors> {
|
||||
let start = start_timer!(|| "evaluation");
|
||||
|
||||
if self.aux_info.num_variables != point.len() {
|
||||
return Err(ArithErrors::InvalidParameters(format!(
|
||||
"wrong number of variables {} vs {}",
|
||||
self.aux_info.num_variables,
|
||||
point.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Evaluate all the MLEs at `point`
|
||||
let evals: Vec<F> = self
|
||||
.flattened_ml_extensions
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.evaluate(point).unwrap() // safe unwrap here since we have
|
||||
// already checked that num_var
|
||||
// matches
|
||||
})
|
||||
.collect();
|
||||
|
||||
let res = self
|
||||
.products
|
||||
.iter()
|
||||
.map(|(c, p)| *c * p.iter().map(|&i| evals[i]).product::<F>())
|
||||
.sum();
|
||||
|
||||
end_timer!(start);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// Input poly f(x) and a random vector r, output
|
||||
// \hat f(x) = \sum_{x_i \in eval_x} f(x_i) eq(x, r)
|
||||
// where
|
||||
// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
|
||||
//
|
||||
// This function is used in ZeroCheck.
|
||||
pub fn build_f_hat(&self, r: &[F]) -> Result<Self, ArithErrors> {
|
||||
let start = start_timer!(|| "zero check build hat f");
|
||||
|
||||
if self.aux_info.num_variables != r.len() {
|
||||
return Err(ArithErrors::InvalidParameters(format!(
|
||||
"r.len() is different from number of variables: {} vs {}",
|
||||
r.len(),
|
||||
self.aux_info.num_variables
|
||||
)));
|
||||
}
|
||||
|
||||
let eq_x_r = build_eq_x_r(r)?;
|
||||
let mut res = self.clone();
|
||||
res.mul_by_mle(eq_x_r, F::one())?;
|
||||
|
||||
end_timer!(start);
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
/// Evaluate eq polynomial.
|
||||
pub fn eq_eval<F: PrimeField>(x: &[F], y: &[F]) -> Result<F, ArithErrors> {
|
||||
if x.len() != y.len() {
|
||||
return Err(ArithErrors::InvalidParameters(
|
||||
"x and y have different length".to_string(),
|
||||
));
|
||||
}
|
||||
let start = start_timer!(|| "eq_eval");
|
||||
let mut res = F::one();
|
||||
for (&xi, &yi) in x.iter().zip(y.iter()) {
|
||||
let xi_yi = xi * yi;
|
||||
res *= xi_yi + xi_yi - xi - yi + F::one();
|
||||
}
|
||||
end_timer!(start);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// This function build the eq(x, r) polynomial for any given r.
|
||||
///
|
||||
/// Evaluate
|
||||
/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
|
||||
/// over r, which is
|
||||
/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
|
||||
fn build_eq_x_r<F: PrimeField>(r: &[F]) -> Result<Arc<DenseMultilinearExtension<F>>, ArithErrors> {
|
||||
let evals = build_eq_x_r_vec(r)?;
|
||||
let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), evals);
|
||||
|
||||
Ok(Arc::new(mle))
|
||||
}
|
||||
/// This function build the eq(x, r) polynomial for any given r, and output the
|
||||
/// evaluation of eq(x, r) in its vector form.
|
||||
///
|
||||
/// Evaluate
|
||||
/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
|
||||
/// over r, which is
|
||||
/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
|
||||
fn build_eq_x_r_vec<F: PrimeField>(r: &[F]) -> Result<Vec<F>, ArithErrors> {
|
||||
// we build eq(x,r) from its evaluations
|
||||
// we want to evaluate eq(x,r) over x \in {0, 1}^num_vars
|
||||
// for example, with num_vars = 4, x is a binary vector of 4, then
|
||||
// 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3)
|
||||
// 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3)
|
||||
// 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3)
|
||||
// 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3)
|
||||
// ....
|
||||
// 1 1 1 1 -> r0 * r1 * r2 * r3
|
||||
// we will need 2^num_var evaluations
|
||||
|
||||
let mut eval = Vec::new();
|
||||
build_eq_x_r_helper(r, &mut eval)?;
|
||||
|
||||
Ok(eval)
|
||||
}
|
||||
|
||||
/// A helper function to build eq(x, r) recursively.
|
||||
/// This function takes `r.len()` steps, and for each step it requires a maximum
|
||||
/// `r.len()-1` multiplications.
|
||||
fn build_eq_x_r_helper<F: PrimeField>(r: &[F], buf: &mut Vec<F>) -> Result<(), ArithErrors> {
|
||||
if r.is_empty() {
|
||||
return Err(ArithErrors::InvalidParameters("r length is 0".to_string()));
|
||||
} else if r.len() == 1 {
|
||||
// initializing the buffer with [1-r_0, r_0]
|
||||
buf.push(F::one() - r[0]);
|
||||
buf.push(r[0]);
|
||||
} else {
|
||||
build_eq_x_r_helper(&r[1..], buf)?;
|
||||
|
||||
// suppose at the previous step we received [b_1, ..., b_k]
|
||||
// for the current step we will need
|
||||
// if x_0 = 0: (1-r0) * [b_1, ..., b_k]
|
||||
// if x_0 = 1: r0 * [b_1, ..., b_k]
|
||||
// let mut res = vec![];
|
||||
// for &b_i in buf.iter() {
|
||||
// let tmp = r[0] * b_i;
|
||||
// res.push(b_i - tmp);
|
||||
// res.push(tmp);
|
||||
// }
|
||||
// *buf = res;
|
||||
|
||||
let mut res = vec![F::zero(); buf.len() << 1];
|
||||
res.par_iter_mut().enumerate().for_each(|(i, val)| {
|
||||
let bi = buf[i >> 1];
|
||||
let tmp = r[0] * bi;
|
||||
if i & 1 == 0 {
|
||||
*val = bi - tmp;
|
||||
} else {
|
||||
*val = tmp;
|
||||
}
|
||||
});
|
||||
*buf = res;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decompose an integer into a binary vector in little endian.
|
||||
pub fn bit_decompose(input: u64, num_var: usize) -> Vec<bool> {
|
||||
let mut res = Vec::with_capacity(num_var);
|
||||
let mut i = input;
|
||||
for _ in 0..num_var {
|
||||
res.push(i & 1 == 1);
|
||||
i >>= 1;
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::utils::multilinear_polynomial::tests::random_mle_list;
|
||||
use ark_ff::UniformRand;
|
||||
use ark_pallas::Fr;
|
||||
use ark_std::{
|
||||
rand::{Rng, RngCore},
|
||||
test_rng,
|
||||
};
|
||||
|
||||
impl<F: PrimeField> VirtualPolynomial<F> {
|
||||
/// Sample a random virtual polynomial, return the polynomial and its sum.
|
||||
fn rand<R: RngCore>(
|
||||
nv: usize,
|
||||
num_multiplicands_range: (usize, usize),
|
||||
num_products: usize,
|
||||
rng: &mut R,
|
||||
) -> Result<(Self, F), ArithErrors> {
|
||||
let start = start_timer!(|| "sample random virtual polynomial");
|
||||
|
||||
let mut sum = F::zero();
|
||||
let mut poly = VirtualPolynomial::new(nv);
|
||||
for _ in 0..num_products {
|
||||
let num_multiplicands =
|
||||
rng.gen_range(num_multiplicands_range.0..num_multiplicands_range.1);
|
||||
let (product, product_sum) = random_mle_list(nv, num_multiplicands, rng);
|
||||
let coefficient = F::rand(rng);
|
||||
poly.add_mle_list(product.into_iter(), coefficient)?;
|
||||
sum += product_sum * coefficient;
|
||||
}
|
||||
|
||||
end_timer!(start);
|
||||
Ok((poly, sum))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_virtual_polynomial_additions() -> Result<(), ArithErrors> {
|
||||
let mut rng = test_rng();
|
||||
for nv in 2..5 {
|
||||
for num_products in 2..5 {
|
||||
let base: Vec<Fr> = (0..nv).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
let (a, _a_sum) =
|
||||
VirtualPolynomial::<Fr>::rand(nv, (2, 3), num_products, &mut rng)?;
|
||||
let (b, _b_sum) =
|
||||
VirtualPolynomial::<Fr>::rand(nv, (2, 3), num_products, &mut rng)?;
|
||||
let c = &a + &b;
|
||||
|
||||
assert_eq!(
|
||||
a.evaluate(base.as_ref())? + b.evaluate(base.as_ref())?,
|
||||
c.evaluate(base.as_ref())?
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_virtual_polynomial_mul_by_mle() -> Result<(), ArithErrors> {
|
||||
let mut rng = test_rng();
|
||||
for nv in 2..5 {
|
||||
for num_products in 2..5 {
|
||||
let base: Vec<Fr> = (0..nv).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
let (a, _a_sum) =
|
||||
VirtualPolynomial::<Fr>::rand(nv, (2, 3), num_products, &mut rng)?;
|
||||
let (b, _b_sum) = random_mle_list(nv, 1, &mut rng);
|
||||
let b_mle = b[0].clone();
|
||||
let coeff = Fr::rand(&mut rng);
|
||||
let b_vp = VirtualPolynomial::new_from_mle(&b_mle, coeff);
|
||||
|
||||
let mut c = a.clone();
|
||||
|
||||
c.mul_by_mle(b_mle, coeff)?;
|
||||
|
||||
assert_eq!(
|
||||
a.evaluate(base.as_ref())? * b_vp.evaluate(base.as_ref())?,
|
||||
c.evaluate(base.as_ref())?
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eq_xr() {
|
||||
let mut rng = test_rng();
|
||||
for nv in 4..10 {
|
||||
let r: Vec<Fr> = (0..nv).map(|_| Fr::rand(&mut rng)).collect();
|
||||
let eq_x_r = build_eq_x_r(r.as_ref()).unwrap();
|
||||
let eq_x_r2 = build_eq_x_r_for_test(r.as_ref());
|
||||
assert_eq!(eq_x_r, eq_x_r2);
|
||||
}
|
||||
}
|
||||
|
||||
/// Naive method to build eq(x, r).
|
||||
/// Only used for testing purpose.
|
||||
// Evaluate
|
||||
// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i))
|
||||
// over r, which is
|
||||
// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i))
|
||||
fn build_eq_x_r_for_test<F: PrimeField>(r: &[F]) -> Arc<DenseMultilinearExtension<F>> {
|
||||
// we build eq(x,r) from its evaluations
|
||||
// we want to evaluate eq(x,r) over x \in {0, 1}^num_vars
|
||||
// for example, with num_vars = 4, x is a binary vector of 4, then
|
||||
// 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3)
|
||||
// 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3)
|
||||
// 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3)
|
||||
// 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3)
|
||||
// ....
|
||||
// 1 1 1 1 -> r0 * r1 * r2 * r3
|
||||
// we will need 2^num_var evaluations
|
||||
|
||||
// First, we build array for {1 - r_i}
|
||||
let one_minus_r: Vec<F> = r.iter().map(|ri| F::one() - ri).collect();
|
||||
|
||||
let num_var = r.len();
|
||||
let mut eval = vec![];
|
||||
|
||||
for i in 0..1 << num_var {
|
||||
let mut current_eval = F::one();
|
||||
let bit_sequence = bit_decompose(i, num_var);
|
||||
|
||||
for (&bit, (ri, one_minus_ri)) in
|
||||
bit_sequence.iter().zip(r.iter().zip(one_minus_r.iter()))
|
||||
{
|
||||
current_eval *= if bit { *ri } else { *one_minus_ri };
|
||||
}
|
||||
eval.push(current_eval);
|
||||
}
|
||||
|
||||
let mle = DenseMultilinearExtension::from_evaluations_vec(num_var, eval);
|
||||
|
||||
Arc::new(mle)
|
||||
}
|
||||
}
|
||||
105
folding-schemes/src/utils/gadgets.rs
Normal file
105
folding-schemes/src/utils/gadgets.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use ark_ff::PrimeField;
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
fields::FieldVar,
|
||||
};
|
||||
use ark_relations::r1cs::{Namespace, SynthesisError};
|
||||
use core::{borrow::Borrow, marker::PhantomData};
|
||||
|
||||
use crate::utils::vec::SparseMatrix;
|
||||
|
||||
pub fn mat_vec_mul_sparse<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
|
||||
m: SparseMatrixVar<F, CF, FV>,
|
||||
v: Vec<FV>,
|
||||
) -> Vec<FV> {
|
||||
let mut res = vec![FV::zero(); m.n_rows];
|
||||
for (row_i, row) in m.coeffs.iter().enumerate() {
|
||||
for (value, col_i) in row.iter() {
|
||||
res[row_i] += value.clone().mul(&v[*col_i].clone());
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
pub fn vec_add<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
|
||||
a: &Vec<FV>,
|
||||
b: &Vec<FV>,
|
||||
) -> Result<Vec<FV>, SynthesisError> {
|
||||
if a.len() != b.len() {
|
||||
return Err(SynthesisError::Unsatisfiable);
|
||||
}
|
||||
let mut r: Vec<FV> = vec![FV::zero(); a.len()];
|
||||
for i in 0..a.len() {
|
||||
r[i] = a[i].clone() + b[i].clone();
|
||||
}
|
||||
Ok(r)
|
||||
}
|
||||
pub fn vec_scalar_mul<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
|
||||
vec: &Vec<FV>,
|
||||
c: &FV,
|
||||
) -> Vec<FV> {
|
||||
let mut result = vec![FV::zero(); vec.len()];
|
||||
for (i, a) in vec.iter().enumerate() {
|
||||
result[i] = a.clone() * c;
|
||||
}
|
||||
result
|
||||
}
|
||||
pub fn hadamard<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>>(
|
||||
a: &Vec<FV>,
|
||||
b: &Vec<FV>,
|
||||
) -> Result<Vec<FV>, SynthesisError> {
|
||||
if a.len() != b.len() {
|
||||
return Err(SynthesisError::Unsatisfiable);
|
||||
}
|
||||
let mut r: Vec<FV> = vec![FV::zero(); a.len()];
|
||||
for i in 0..a.len() {
|
||||
r[i] = a[i].clone() * b[i].clone();
|
||||
}
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SparseMatrixVar<F: PrimeField, CF: PrimeField, FV: FieldVar<F, CF>> {
|
||||
_f: PhantomData<F>,
|
||||
_cf: PhantomData<CF>,
|
||||
_fv: PhantomData<FV>,
|
||||
pub n_rows: usize,
|
||||
pub n_cols: usize,
|
||||
// same format as the native SparseMatrix (which follows ark_relations::r1cs::Matrix format
|
||||
pub coeffs: Vec<Vec<(FV, usize)>>,
|
||||
}
|
||||
|
||||
impl<F, CF, FV> AllocVar<SparseMatrix<F>, CF> for SparseMatrixVar<F, CF, FV>
|
||||
where
|
||||
F: PrimeField,
|
||||
CF: PrimeField,
|
||||
FV: FieldVar<F, CF>,
|
||||
{
|
||||
fn new_variable<T: Borrow<SparseMatrix<F>>>(
|
||||
cs: impl Into<Namespace<CF>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|val| {
|
||||
let cs = cs.into();
|
||||
|
||||
let mut coeffs: Vec<Vec<(FV, usize)>> = Vec::new();
|
||||
for row in val.borrow().coeffs.iter() {
|
||||
let mut rowVar: Vec<(FV, usize)> = Vec::new();
|
||||
for &(value, col_i) in row.iter() {
|
||||
let coeffVar = FV::new_variable(cs.clone(), || Ok(value), mode)?;
|
||||
rowVar.push((coeffVar, col_i));
|
||||
}
|
||||
coeffs.push(rowVar);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
_f: PhantomData,
|
||||
_cf: PhantomData,
|
||||
_fv: PhantomData,
|
||||
n_rows: val.borrow().n_rows,
|
||||
n_cols: val.borrow().n_cols,
|
||||
coeffs,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
77
folding-schemes/src/utils/hypercube.rs
Normal file
77
folding-schemes/src/utils/hypercube.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
/// A boolean hypercube structure to create an ergonomic evaluation domain
|
||||
use crate::utils::virtual_polynomial::bit_decompose;
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
/// A boolean hypercube that returns its points as an iterator
|
||||
/// If you iterate on it for 3 variables you will get points in little-endian order:
|
||||
/// 000 -> 100 -> 010 -> 110 -> 001 -> 101 -> 011 -> 111
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BooleanHypercube<F: PrimeField> {
|
||||
_f: PhantomData<F>,
|
||||
n_vars: usize,
|
||||
current: u64,
|
||||
max: u64,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> BooleanHypercube<F> {
|
||||
pub fn new(n_vars: usize) -> Self {
|
||||
BooleanHypercube::<F> {
|
||||
_f: PhantomData::<F>,
|
||||
n_vars,
|
||||
current: 0,
|
||||
max: 2_u32.pow(n_vars as u32) as u64,
|
||||
}
|
||||
}
|
||||
|
||||
/// returns the entry at given i (which is the little-endian bit representation of i)
|
||||
pub fn at_i(&self, i: usize) -> Vec<F> {
|
||||
assert!(i < self.max as usize);
|
||||
let bits = bit_decompose((i) as u64, self.n_vars);
|
||||
bits.iter().map(|&x| F::from(x)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField> Iterator for BooleanHypercube<F> {
|
||||
type Item = Vec<F>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let bits = bit_decompose(self.current, self.n_vars);
|
||||
let result: Vec<F> = bits.iter().map(|&x| F::from(x)).collect();
|
||||
self.current += 1;
|
||||
|
||||
if self.current > self.max {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::utils::vec::tests::to_F_dense_matrix;
|
||||
use ark_pallas::Fr;
|
||||
|
||||
#[test]
|
||||
fn test_hypercube() {
|
||||
let expected_results = to_F_dense_matrix(vec![
|
||||
vec![0, 0, 0],
|
||||
vec![1, 0, 0],
|
||||
vec![0, 1, 0],
|
||||
vec![1, 1, 0],
|
||||
vec![0, 0, 1],
|
||||
vec![1, 0, 1],
|
||||
vec![0, 1, 1],
|
||||
vec![1, 1, 1],
|
||||
]);
|
||||
|
||||
let bhc = BooleanHypercube::<Fr>::new(3);
|
||||
for (i, point) in bhc.clone().enumerate() {
|
||||
assert_eq!(point, expected_results[i]);
|
||||
assert_eq!(point, bhc.at_i(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
123
folding-schemes/src/utils/lagrange_poly.rs
Normal file
123
folding-schemes/src/utils/lagrange_poly.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
|
||||
|
||||
/// Computes the lagrange interpolated polynomial from the given points `p_i`
|
||||
pub fn compute_lagrange_interpolated_poly<F: PrimeField>(p_i: &[F]) -> DensePolynomial<F> {
|
||||
// domain is 0..p_i.len(), to fit `interpolate_uni_poly` from hyperplonk
|
||||
let domain: Vec<usize> = (0..p_i.len()).collect();
|
||||
|
||||
// compute l(x), common to every basis polynomial
|
||||
let mut l_x = DensePolynomial::from_coefficients_vec(vec![F::ONE]);
|
||||
for x_m in domain.clone() {
|
||||
let prod_m = DensePolynomial::from_coefficients_vec(vec![-F::from(x_m as u64), F::ONE]);
|
||||
l_x = &l_x * &prod_m;
|
||||
}
|
||||
|
||||
// compute each w_j - barycentric weights
|
||||
let mut w_j_vector: Vec<F> = vec![];
|
||||
for x_j in domain.clone() {
|
||||
let mut w_j = F::ONE;
|
||||
for x_m in domain.clone() {
|
||||
if x_m != x_j {
|
||||
let prod = (F::from(x_j as u64) - F::from(x_m as u64))
|
||||
.inverse()
|
||||
.unwrap(); // an inverse always exists since x_j != x_m (!=0)
|
||||
// hence, we call unwrap() here without checking the Option's content
|
||||
w_j *= prod;
|
||||
}
|
||||
}
|
||||
w_j_vector.push(w_j);
|
||||
}
|
||||
|
||||
// compute each polynomial within the sum L(x)
|
||||
let mut lagrange_poly = DensePolynomial::from_coefficients_vec(vec![F::ZERO]);
|
||||
for (j, w_j) in w_j_vector.iter().enumerate() {
|
||||
let x_j = domain[j];
|
||||
let y_j = p_i[j];
|
||||
// we multiply by l(x) here, otherwise the below division will not work - deg(0)/deg(d)
|
||||
let poly_numerator = &(&l_x * (*w_j)) * (y_j);
|
||||
let poly_denominator =
|
||||
DensePolynomial::from_coefficients_vec(vec![-F::from(x_j as u64), F::ONE]);
|
||||
let poly = &poly_numerator / &poly_denominator;
|
||||
lagrange_poly = &lagrange_poly + &poly;
|
||||
}
|
||||
|
||||
lagrange_poly
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::utils::espresso::sum_check::verifier::interpolate_uni_poly;
|
||||
use crate::utils::lagrange_poly::compute_lagrange_interpolated_poly;
|
||||
use ark_pallas::Fr;
|
||||
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial};
|
||||
use ark_std::{vec::Vec, UniformRand};
|
||||
use espresso_subroutines::poly_iop::prelude::PolyIOPErrors;
|
||||
|
||||
#[test]
|
||||
fn test_compute_lagrange_interpolated_poly() {
|
||||
let mut prng = ark_std::test_rng();
|
||||
for degree in 1..30 {
|
||||
let poly = DensePolynomial::<Fr>::rand(degree, &mut prng);
|
||||
// range (which is exclusive) is from 0 to degree + 1, since we need degree + 1 evaluations
|
||||
let evals = (0..(degree + 1))
|
||||
.map(|i| poly.evaluate(&Fr::from(i as u64)))
|
||||
.collect::<Vec<Fr>>();
|
||||
let lagrange_poly = compute_lagrange_interpolated_poly(&evals);
|
||||
for _ in 0..10 {
|
||||
let query = Fr::rand(&mut prng);
|
||||
let lagrange_eval = lagrange_poly.evaluate(&query);
|
||||
let eval = poly.evaluate(&query);
|
||||
assert_eq!(eval, lagrange_eval);
|
||||
assert_eq!(lagrange_poly.degree(), poly.degree());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interpolation() -> Result<(), PolyIOPErrors> {
|
||||
let mut prng = ark_std::test_rng();
|
||||
|
||||
// test a polynomial with 20 known points, i.e., with degree 19
|
||||
let poly = DensePolynomial::<Fr>::rand(20 - 1, &mut prng);
|
||||
let evals = (0..20)
|
||||
.map(|i| poly.evaluate(&Fr::from(i)))
|
||||
.collect::<Vec<Fr>>();
|
||||
let query = Fr::rand(&mut prng);
|
||||
|
||||
assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?);
|
||||
assert_eq!(
|
||||
compute_lagrange_interpolated_poly(&evals).evaluate(&query),
|
||||
interpolate_uni_poly(&evals, query)?
|
||||
);
|
||||
|
||||
// test a polynomial with 33 known points, i.e., with degree 32
|
||||
let poly = DensePolynomial::<Fr>::rand(33 - 1, &mut prng);
|
||||
let evals = (0..33)
|
||||
.map(|i| poly.evaluate(&Fr::from(i)))
|
||||
.collect::<Vec<Fr>>();
|
||||
let query = Fr::rand(&mut prng);
|
||||
|
||||
assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?);
|
||||
assert_eq!(
|
||||
compute_lagrange_interpolated_poly(&evals).evaluate(&query),
|
||||
interpolate_uni_poly(&evals, query)?
|
||||
);
|
||||
|
||||
// test a polynomial with 64 known points, i.e., with degree 63
|
||||
let poly = DensePolynomial::<Fr>::rand(64 - 1, &mut prng);
|
||||
let evals = (0..64)
|
||||
.map(|i| poly.evaluate(&Fr::from(i)))
|
||||
.collect::<Vec<Fr>>();
|
||||
let query = Fr::rand(&mut prng);
|
||||
|
||||
assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?);
|
||||
assert_eq!(
|
||||
compute_lagrange_interpolated_poly(&evals).evaluate(&query),
|
||||
interpolate_uni_poly(&evals, query)?
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
167
folding-schemes/src/utils/mle.rs
Normal file
167
folding-schemes/src/utils/mle.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
/// Some basic MLE utilities
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::DenseMultilinearExtension;
|
||||
use ark_std::log2;
|
||||
|
||||
use super::vec::SparseMatrix;
|
||||
|
||||
/// Pad matrix so that its columns and rows are powers of two
|
||||
pub fn pad_matrix<F: PrimeField>(m: &SparseMatrix<F>) -> SparseMatrix<F> {
|
||||
let mut r = m.clone();
|
||||
r.n_rows = m.n_rows.next_power_of_two();
|
||||
r.n_cols = m.n_cols.next_power_of_two();
|
||||
r
|
||||
}
|
||||
|
||||
/// Returns the dense multilinear extension from the given matrix, without modifying the original
|
||||
/// matrix.
|
||||
pub fn matrix_to_mle<F: PrimeField>(matrix: SparseMatrix<F>) -> DenseMultilinearExtension<F> {
|
||||
let n_vars: usize = (log2(matrix.n_rows) + log2(matrix.n_cols)) as usize; // n_vars = s + s'
|
||||
|
||||
// Matrices might need to get padded before turned into an MLE
|
||||
let padded_matrix = pad_matrix(&matrix);
|
||||
|
||||
// build dense vector representing the sparse padded matrix
|
||||
let mut v: Vec<F> = vec![F::zero(); padded_matrix.n_rows * padded_matrix.n_cols];
|
||||
for (row_i, row) in padded_matrix.coeffs.iter().enumerate() {
|
||||
for &(value, col_i) in row.iter() {
|
||||
v[(padded_matrix.n_cols * row_i) + col_i] = value;
|
||||
}
|
||||
}
|
||||
|
||||
// convert the dense vector into a mle
|
||||
vec_to_mle(n_vars, &v)
|
||||
}
|
||||
|
||||
/// Takes the n_vars and a dense vector and returns its dense MLE.
|
||||
pub fn vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
|
||||
let v_padded: Vec<F> = if v.len() != (1 << n_vars) {
|
||||
// pad to 2^n_vars
|
||||
[
|
||||
v.clone(),
|
||||
std::iter::repeat(F::zero())
|
||||
.take((1 << n_vars) - v.len())
|
||||
.collect(),
|
||||
]
|
||||
.concat()
|
||||
} else {
|
||||
v.clone()
|
||||
};
|
||||
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
|
||||
}
|
||||
|
||||
pub fn dense_vec_to_mle<F: PrimeField>(n_vars: usize, v: &Vec<F>) -> DenseMultilinearExtension<F> {
|
||||
dbg!(n_vars);
|
||||
dbg!(v.len());
|
||||
// Pad to 2^n_vars
|
||||
let v_padded: Vec<F> = [
|
||||
v.clone(),
|
||||
std::iter::repeat(F::zero())
|
||||
.take((1 << n_vars) - v.len())
|
||||
.collect(),
|
||||
]
|
||||
.concat();
|
||||
DenseMultilinearExtension::<F>::from_evaluations_vec(n_vars, v_padded)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
ccs::tests::get_test_z,
|
||||
utils::multilinear_polynomial::fix_variables,
|
||||
utils::multilinear_polynomial::tests::fix_last_variables,
|
||||
utils::{hypercube::BooleanHypercube, vec::tests::to_F_matrix},
|
||||
};
|
||||
use ark_poly::MultilinearExtension;
|
||||
use ark_std::Zero;
|
||||
|
||||
use ark_pallas::Fr;
|
||||
|
||||
#[test]
|
||||
fn test_matrix_to_mle() {
|
||||
let A = to_F_matrix::<Fr>(vec![
|
||||
vec![2, 3, 4, 4],
|
||||
vec![4, 11, 14, 14],
|
||||
vec![2, 8, 17, 17],
|
||||
vec![420, 4, 2, 0],
|
||||
]);
|
||||
|
||||
let A_mle = matrix_to_mle(A);
|
||||
dbg!(&A_mle);
|
||||
assert_eq!(A_mle.evaluations.len(), 16); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals
|
||||
|
||||
let A = to_F_matrix::<Fr>(vec![
|
||||
vec![2, 3, 4, 4, 1],
|
||||
vec![4, 11, 14, 14, 2],
|
||||
vec![2, 8, 17, 17, 3],
|
||||
vec![420, 4, 2, 0, 4],
|
||||
vec![420, 4, 2, 0, 5],
|
||||
]);
|
||||
let A_mle = matrix_to_mle(A.clone());
|
||||
assert_eq!(A_mle.evaluations.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals
|
||||
|
||||
// check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values
|
||||
let bhc = BooleanHypercube::new(A_mle.num_vars);
|
||||
let A_padded = pad_matrix(&A);
|
||||
let A_padded_dense = A_padded.to_dense();
|
||||
for (i, A_row) in A_padded_dense.iter().enumerate() {
|
||||
for (j, _) in A_row.iter().enumerate() {
|
||||
let s_i_j = bhc.at_i(i * A_row.len() + j);
|
||||
assert_eq!(A_mle.evaluate(&s_i_j).unwrap(), A_padded_dense[i][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_to_mle() {
|
||||
let z = get_test_z::<Fr>(3);
|
||||
let n_vars = 3;
|
||||
let z_mle = dense_vec_to_mle(n_vars, &z);
|
||||
|
||||
// check that the z_mle evaluated over the boolean hypercube equals the vec z_i values
|
||||
let bhc = BooleanHypercube::new(z_mle.num_vars);
|
||||
for (i, z_i) in z.iter().enumerate() {
|
||||
let s_i = bhc.at_i(i);
|
||||
assert_eq!(z_mle.evaluate(&s_i).unwrap(), z_i.clone());
|
||||
}
|
||||
// for the rest of elements of the boolean hypercube, expect it to evaluate to zero
|
||||
for i in (z.len())..(1 << z_mle.num_vars) {
|
||||
let s_i = bhc.at_i(i);
|
||||
assert_eq!(z_mle.evaluate(&s_i).unwrap(), Fr::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fix_variables() {
|
||||
let A = to_F_matrix(vec![
|
||||
vec![2, 3, 4, 4],
|
||||
vec![4, 11, 14, 14],
|
||||
vec![2, 8, 17, 17],
|
||||
vec![420, 4, 2, 0],
|
||||
]);
|
||||
|
||||
let A_mle = matrix_to_mle(A.clone());
|
||||
let A = A.to_dense();
|
||||
let bhc = BooleanHypercube::new(2);
|
||||
for (i, y) in bhc.enumerate() {
|
||||
// First check that the arkworks and espresso funcs match
|
||||
let expected_fix_left = A_mle.fix_variables(&y); // try arkworks fix_variables
|
||||
let fix_left = fix_variables(&A_mle, &y); // try espresso fix_variables
|
||||
assert_eq!(fix_left, expected_fix_left);
|
||||
|
||||
// Check that fixing first variables pins down a column
|
||||
// i.e. fixing x to 0 will return the first column
|
||||
// fixing x to 1 will return the second column etc.
|
||||
let column_i: Vec<Fr> = A.clone().iter().map(|x| x[i]).collect();
|
||||
assert_eq!(fix_left.evaluations, column_i);
|
||||
|
||||
// Now check that fixing last variables pins down a row
|
||||
// i.e. fixing y to 0 will return the first row
|
||||
// fixing y to 1 will return the second row etc.
|
||||
let row_i: Vec<Fr> = A[i].clone();
|
||||
let fix_right = fix_last_variables(&A_mle, &y);
|
||||
assert_eq!(fix_right.evaluations, row_i);
|
||||
}
|
||||
}
|
||||
}
|
||||
12
folding-schemes/src/utils/mod.rs
Normal file
12
folding-schemes/src/utils/mod.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
pub mod bit;
|
||||
pub mod gadgets;
|
||||
pub mod hypercube;
|
||||
pub mod lagrange_poly;
|
||||
pub mod mle;
|
||||
pub mod vec;
|
||||
|
||||
// expose espresso local modules
|
||||
pub mod espresso;
|
||||
pub use crate::utils::espresso::multilinear_polynomial;
|
||||
pub use crate::utils::espresso::sum_check;
|
||||
pub use crate::utils::espresso::virtual_polynomial;
|
||||
209
folding-schemes/src/utils/vec.rs
Normal file
209
folding-schemes/src/utils/vec.rs
Normal file
@@ -0,0 +1,209 @@
|
||||
use ark_ff::PrimeField;
|
||||
pub use ark_relations::r1cs::Matrix as R1CSMatrix;
|
||||
use ark_std::cfg_iter;
|
||||
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
|
||||
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct SparseMatrix<F: PrimeField> {
|
||||
pub n_rows: usize,
|
||||
pub n_cols: usize,
|
||||
/// coeffs = R1CSMatrix = Vec<Vec<(F, usize)>>, which contains each row and the F is the value
|
||||
/// of the coefficient and the usize indicates the column position
|
||||
pub coeffs: R1CSMatrix<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> SparseMatrix<F> {
|
||||
pub fn to_dense(&self) -> Vec<Vec<F>> {
|
||||
let mut r: Vec<Vec<F>> = vec![vec![F::zero(); self.n_cols]; self.n_rows];
|
||||
for (row_i, row) in self.coeffs.iter().enumerate() {
|
||||
for &(value, col_i) in row.iter() {
|
||||
r[row_i][col_i] = value;
|
||||
}
|
||||
}
|
||||
r
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dense_matrix_to_sparse<F: PrimeField>(m: Vec<Vec<F>>) -> SparseMatrix<F> {
|
||||
let mut r = SparseMatrix::<F> {
|
||||
n_rows: m.len(),
|
||||
n_cols: m[0].len(),
|
||||
coeffs: Vec::new(),
|
||||
};
|
||||
for m_row in m.iter() {
|
||||
let mut row: Vec<(F, usize)> = Vec::new();
|
||||
for (col_i, value) in m_row.iter().enumerate() {
|
||||
if !value.is_zero() {
|
||||
row.push((*value, col_i));
|
||||
}
|
||||
}
|
||||
r.coeffs.push(row);
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
pub fn vec_add<F: PrimeField>(a: &[F], b: &[F]) -> Result<Vec<F>, Error> {
|
||||
if a.len() != b.len() {
|
||||
return Err(Error::NotSameLength(
|
||||
"a.len()".to_string(),
|
||||
a.len(),
|
||||
"b.len()".to_string(),
|
||||
b.len(),
|
||||
));
|
||||
}
|
||||
Ok(a.iter().zip(b.iter()).map(|(x, y)| *x + y).collect())
|
||||
}
|
||||
|
||||
pub fn vec_sub<F: PrimeField>(a: &[F], b: &[F]) -> Result<Vec<F>, Error> {
|
||||
if a.len() != b.len() {
|
||||
return Err(Error::NotSameLength(
|
||||
"a.len()".to_string(),
|
||||
a.len(),
|
||||
"b.len()".to_string(),
|
||||
b.len(),
|
||||
));
|
||||
}
|
||||
Ok(a.iter().zip(b.iter()).map(|(x, y)| *x - y).collect())
|
||||
}
|
||||
|
||||
pub fn vec_scalar_mul<F: PrimeField>(vec: &[F], c: &F) -> Vec<F> {
|
||||
vec.iter().map(|a| *a * c).collect()
|
||||
}
|
||||
|
||||
pub fn is_zero_vec<F: PrimeField>(vec: &[F]) -> bool {
|
||||
vec.iter().all(|a| a.is_zero())
|
||||
}
|
||||
|
||||
pub fn mat_vec_mul<F: PrimeField>(M: &Vec<Vec<F>>, z: &[F]) -> Result<Vec<F>, Error> {
|
||||
if M.is_empty() {
|
||||
return Err(Error::Empty);
|
||||
}
|
||||
if M[0].len() != z.len() {
|
||||
return Err(Error::NotSameLength(
|
||||
"M[0].len()".to_string(),
|
||||
M[0].len(),
|
||||
"z.len()".to_string(),
|
||||
z.len(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut r: Vec<F> = vec![F::zero(); M.len()];
|
||||
for (i, M_i) in M.iter().enumerate() {
|
||||
for (j, M_ij) in M_i.iter().enumerate() {
|
||||
r[i] += *M_ij * z[j];
|
||||
}
|
||||
}
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn mat_vec_mul_sparse<F: PrimeField>(M: &SparseMatrix<F>, z: &[F]) -> Result<Vec<F>, Error> {
|
||||
if M.n_cols != z.len() {
|
||||
return Err(Error::NotSameLength(
|
||||
"M.n_cols".to_string(),
|
||||
M.n_cols,
|
||||
"z.len()".to_string(),
|
||||
z.len(),
|
||||
));
|
||||
}
|
||||
let mut res = vec![F::zero(); M.n_rows];
|
||||
for (row_i, row) in M.coeffs.iter().enumerate() {
|
||||
for &(value, col_i) in row.iter() {
|
||||
res[row_i] += value * z[col_i];
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn hadamard<F: PrimeField>(a: &[F], b: &[F]) -> Result<Vec<F>, Error> {
|
||||
if a.len() != b.len() {
|
||||
return Err(Error::NotSameLength(
|
||||
"a.len()".to_string(),
|
||||
a.len(),
|
||||
"b.len()".to_string(),
|
||||
b.len(),
|
||||
));
|
||||
}
|
||||
Ok(cfg_iter!(a).zip(b).map(|(a, b)| *a * b).collect())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_pallas::Fr;
|
||||
|
||||
pub fn to_F_matrix<F: PrimeField>(M: Vec<Vec<usize>>) -> SparseMatrix<F> {
|
||||
dense_matrix_to_sparse(to_F_dense_matrix(M))
|
||||
}
|
||||
pub fn to_F_dense_matrix<F: PrimeField>(M: Vec<Vec<usize>>) -> Vec<Vec<F>> {
|
||||
M.iter()
|
||||
.map(|m| m.iter().map(|r| F::from(*r as u64)).collect())
|
||||
.collect()
|
||||
}
|
||||
pub fn to_F_vec<F: PrimeField>(z: Vec<usize>) -> Vec<F> {
|
||||
z.iter().map(|c| F::from(*c as u64)).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_sparse_conversions() {
|
||||
let A = to_F_dense_matrix::<Fr>(vec![
|
||||
vec![0, 1, 0, 0, 0, 0],
|
||||
vec![0, 0, 0, 1, 0, 0],
|
||||
vec![0, 1, 0, 0, 1, 0],
|
||||
vec![5, 0, 0, 0, 0, 1],
|
||||
]);
|
||||
let A_sparse = dense_matrix_to_sparse(A.clone());
|
||||
assert_eq!(A_sparse.to_dense(), A);
|
||||
}
|
||||
|
||||
// test mat_vec_mul & mat_vec_mul_sparse
|
||||
#[test]
|
||||
fn test_mat_vec_mul() {
|
||||
let A = to_F_matrix::<Fr>(vec![
|
||||
vec![0, 1, 0, 0, 0, 0],
|
||||
vec![0, 0, 0, 1, 0, 0],
|
||||
vec![0, 1, 0, 0, 1, 0],
|
||||
vec![5, 0, 0, 0, 0, 1],
|
||||
])
|
||||
.to_dense();
|
||||
let z = to_F_vec(vec![1, 3, 35, 9, 27, 30]);
|
||||
assert_eq!(mat_vec_mul(&A, &z).unwrap(), to_F_vec(vec![3, 9, 30, 35]));
|
||||
assert_eq!(
|
||||
mat_vec_mul_sparse(&dense_matrix_to_sparse(A), &z).unwrap(),
|
||||
to_F_vec(vec![3, 9, 30, 35])
|
||||
);
|
||||
|
||||
let A = to_F_matrix::<Fr>(vec![vec![2, 3, 4, 5], vec![4, 8, 12, 14], vec![9, 8, 7, 6]]);
|
||||
let v = to_F_vec(vec![19, 55, 50, 3]);
|
||||
|
||||
assert_eq!(
|
||||
mat_vec_mul(&A.to_dense(), &v).unwrap(),
|
||||
to_F_vec(vec![418, 1158, 979])
|
||||
);
|
||||
assert_eq!(
|
||||
mat_vec_mul_sparse(&A, &v).unwrap(),
|
||||
to_F_vec(vec![418, 1158, 979])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hadamard_product() {
|
||||
let a = to_F_vec::<Fr>(vec![1, 2, 3, 4, 5, 6]);
|
||||
let b = to_F_vec(vec![7, 8, 9, 10, 11, 12]);
|
||||
assert_eq!(
|
||||
hadamard(&a, &b).unwrap(),
|
||||
to_F_vec(vec![7, 16, 27, 40, 55, 72])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_add() {
|
||||
let a: Vec<Fr> = to_F_vec::<Fr>(vec![1, 2, 3, 4, 5, 6]);
|
||||
let b: Vec<Fr> = to_F_vec(vec![7, 8, 9, 10, 11, 12]);
|
||||
assert_eq!(
|
||||
vec_add(&a, &b).unwrap(),
|
||||
to_F_vec(vec![8, 10, 12, 14, 16, 18])
|
||||
);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user