mirror of
https://github.com/arnaucube/protogalaxy-poc.git
synced 2026-01-11 16:31:31 +01:00
implement K(X) & G(X) computation. Folding works
- implement G(X) & K(X) computation - update e* and phi* usage of L_i(X) - extend readme.md with this commit, prover & verifier folding works, and outputed instances satisfy the relation check.
This commit is contained in:
73
README.md
73
README.md
@@ -5,3 +5,76 @@ Proof of concept implementation of ProtoGalaxy (https://eprint.iacr.org/2023/110
|
||||
> Do not use in production.
|
||||
|
||||
Thanks to [Liam Eagen](https://twitter.com/LiamEagen) and [Ariel Gabizon](https://twitter.com/rel_zeta_tech) for their kind explanations.
|
||||
|
||||
This code has been done in the context of the research on folding schemes in [0xPARC](https://0xparc.org).
|
||||
|
||||

|
||||
|
||||
(img: protogalaxies colliding, [from Wikipedia](https://en.wikipedia.org/wiki/File:Stellar_Fireworks_Finale.jpg))
|
||||
|
||||
## Details
|
||||
Implementation of ProtoGalaxy's scheme described in section 4 of the paper.
|
||||
|
||||
Current version implements the folding on prover & verifier and it works, but it is not optimized.
|
||||
Next steps in terms of implementation include: F(X) O(n) construction following Claim 4.4, compute K(X) in O(kd log(kd)M + ndkC) as described in Claim 4.5, add tests folding in multiple iterations and also in a tree approach, add the decider and integrate with some existing R1CS tooling for the R1CS & witness generation.
|
||||
|
||||
### Usage
|
||||
|
||||
Example of folding k+1 instances:
|
||||
```rust
|
||||
// assume we have:
|
||||
// an R1CS instance 'r1cs'
|
||||
// a valid witness 'w' from our running instance
|
||||
// k valid 'witnesses' to be fold
|
||||
|
||||
// compute the committed instance for our running witness
|
||||
let phi = Pedersen::<G1Projective>::commit(&pedersen_params, &witness.w, &witness.r_w);
|
||||
let instance = CommittedInstance::<G1Projective> {
|
||||
phi,
|
||||
betas: betas.clone(),
|
||||
e: Fr::zero(),
|
||||
};
|
||||
|
||||
// compute the k committed instances to be fold
|
||||
let mut instances: Vec<CommittedInstance<G1Projective>> = Vec::new();
|
||||
for i in 0..k {
|
||||
let phi_i =
|
||||
Pedersen::<G1Projective>::commit(&pedersen_params, &witnesses[i].w, &witnesses[i].r_w);
|
||||
let instance_i = CommittedInstance::<G1Projective> {
|
||||
phi: phi_i,
|
||||
betas: betas.clone(),
|
||||
e: Fr::zero(),
|
||||
};
|
||||
witnesses.push(witness_i);
|
||||
instances.push(instance_i);
|
||||
}
|
||||
|
||||
// set the initial random betas
|
||||
let beta = Fr::rand(&mut rng);
|
||||
let betas = powers_of_beta(beta, t);
|
||||
|
||||
// Prover folds the instances and witnesses
|
||||
let (F_coeffs, K_coeffs, folded_instance, folded_witness) = Folding::<G1Projective>::prover(
|
||||
&mut transcript_p,
|
||||
&r1cs,
|
||||
instance.clone(),
|
||||
witness,
|
||||
instances.clone(),
|
||||
witnesses,
|
||||
);
|
||||
|
||||
// verifier folds the instances
|
||||
let folded_instance_v = Folding::<G1Projective>::verifier(
|
||||
&mut transcript_v,
|
||||
&r1cs,
|
||||
instance,
|
||||
instances,
|
||||
F_coeffs,
|
||||
K_coeffs,
|
||||
);
|
||||
|
||||
// check that the folded instance satisfies the relation
|
||||
assert!(check_instance(&r2cs, folded_instance, folded_witness));
|
||||
|
||||
```
|
||||
(see the actual code for more details)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(unused)] // TMP
|
||||
#![allow(dead_code)] // TMP
|
||||
// #![allow(unused)] // TMP
|
||||
// #![allow(dead_code)] // TMP
|
||||
|
||||
pub mod pedersen;
|
||||
pub mod protogalaxy;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/// pedersen.rs file and adapted from https://github.com/arnaucube/nova-study
|
||||
/// pedersen.rs file adapted from https://github.com/arnaucube/nova-study
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_std::{
|
||||
rand::{Rng, RngCore},
|
||||
|
||||
@@ -2,17 +2,16 @@ use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::{CurveGroup, Group};
|
||||
use ark_ff::fields::PrimeField;
|
||||
use ark_std::log2;
|
||||
use ark_std::{cfg_into_iter, One, Zero};
|
||||
use ark_std::{cfg_into_iter, Zero};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Add, Mul};
|
||||
use std::ops::Add;
|
||||
|
||||
use ark_ff::{batch_inversion, FftField};
|
||||
use ark_poly::{
|
||||
univariate::{DensePolynomial, SparsePolynomial},
|
||||
DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial,
|
||||
};
|
||||
|
||||
use crate::pedersen::{Commitment, Params as PedersenParams, Pedersen, Proof as PedersenProof};
|
||||
use crate::pedersen::Commitment;
|
||||
use crate::transcript::Transcript;
|
||||
use crate::utils::*;
|
||||
|
||||
@@ -40,8 +39,7 @@ where
|
||||
{
|
||||
// WIP naming of functions
|
||||
pub fn prover(
|
||||
tr: &mut Transcript<C::ScalarField, C>,
|
||||
pedersen_params: &PedersenParams<C>,
|
||||
transcript: &mut Transcript<C::ScalarField, C>,
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
// running instance
|
||||
instance: CommittedInstance<C>,
|
||||
@@ -57,13 +55,14 @@ where
|
||||
) {
|
||||
let t = instance.betas.len();
|
||||
let n = r1cs.A[0].len();
|
||||
assert_eq!(w.w.len(), n);
|
||||
assert_eq!(log2(n) as usize, t);
|
||||
|
||||
// TODO initialize transcript
|
||||
let delta = tr.get_challenge();
|
||||
let delta = transcript.get_challenge();
|
||||
let deltas = powers_of_beta(delta, t);
|
||||
|
||||
let f_w = eval_f(&r1cs, &w.w);
|
||||
// println!("is f(w) {:?}", f_w);
|
||||
let f_w = eval_f(r1cs, &w.w);
|
||||
|
||||
// F(X)
|
||||
let mut F_X: SparsePolynomial<C::ScalarField> = SparsePolynomial::zero();
|
||||
@@ -74,9 +73,9 @@ where
|
||||
}
|
||||
|
||||
let F_X_dense = DensePolynomial::from(F_X.clone());
|
||||
tr.add_vec(&F_X_dense.coeffs);
|
||||
transcript.add_vec(&F_X_dense.coeffs);
|
||||
|
||||
let alpha = tr.get_challenge();
|
||||
let alpha = transcript.get_challenge();
|
||||
|
||||
// eval F(alpha)
|
||||
let F_alpha = F_X.evaluate(&alpha);
|
||||
@@ -106,31 +105,78 @@ where
|
||||
w.clone(),
|
||||
));
|
||||
|
||||
let gamma = tr.get_challenge();
|
||||
let gamma = transcript.get_challenge();
|
||||
|
||||
// TODO WIP compute G(X) & K(X)
|
||||
let G_evals: Vec<C::ScalarField> = vec![C::ScalarField::zero(); n];
|
||||
let mut ws: Vec<Vec<C::ScalarField>> = Vec::new();
|
||||
ws.push(w.w.clone());
|
||||
for wj in vec_w.iter() {
|
||||
assert_eq!(wj.w.len(), n);
|
||||
ws.push(wj.w.clone());
|
||||
}
|
||||
|
||||
let k = vec_instances.len();
|
||||
let H = GeneralEvaluationDomain::<C::ScalarField>::new(k + 1).unwrap();
|
||||
let EH = GeneralEvaluationDomain::<C::ScalarField>::new(t * k + 1).unwrap();
|
||||
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
|
||||
|
||||
// K(X) computation in a naive way, next iterations will compute K(X) as described in Claim
|
||||
// 4.5 of the paper.
|
||||
let mut G_evals: Vec<C::ScalarField> = vec![C::ScalarField::zero(); EH.size()];
|
||||
for (hi, h) in EH.elements().enumerate() {
|
||||
// each iteration evaluates G(h)
|
||||
// inner = L_0(x) * w + \sum_k L_i(x) * w_j
|
||||
let mut inner: Vec<C::ScalarField> = vec![C::ScalarField::zero(); ws[0].len()];
|
||||
for (i, w) in ws.iter().enumerate() {
|
||||
// Li_w = Li(X) * wj
|
||||
let mut Li_w: Vec<DensePolynomial<C::ScalarField>> =
|
||||
vec![DensePolynomial::<C::ScalarField>::zero(); w.len()];
|
||||
for (j, wj) in w.iter().enumerate() {
|
||||
let Li_wj = &L_X[i] * *wj;
|
||||
Li_w[j] = Li_wj;
|
||||
}
|
||||
// Li_w_h = Li_w(h) = Li(h) * wj
|
||||
let mut Liw_h: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w.len()];
|
||||
for (j, _) in Li_w.iter().enumerate() {
|
||||
Liw_h[j] = Li_w[j].evaluate(&h);
|
||||
}
|
||||
|
||||
for j in 0..inner.len() {
|
||||
inner[j] += Liw_h[j];
|
||||
}
|
||||
}
|
||||
let f_ev = eval_f(r1cs, &inner);
|
||||
|
||||
let mut Gsum = C::ScalarField::zero();
|
||||
for i in 0..n {
|
||||
let pow_i_betas = pow_i(i, &betas_star);
|
||||
let curr = pow_i_betas * f_ev[i];
|
||||
Gsum += curr;
|
||||
}
|
||||
// G_evals[hi] = Gsum / Z_X.evaluate(&h); // WIP
|
||||
G_evals[hi] = Gsum;
|
||||
}
|
||||
let G_X: DensePolynomial<C::ScalarField> =
|
||||
Evaluations::<C::ScalarField>::from_vec_and_domain(G_evals.clone(), H).interpolate();
|
||||
// dbg!(&G_X);
|
||||
let (K_X, remainder) = G_X.divide_by_vanishing_poly(H).unwrap();
|
||||
// dbg!(&K_X);
|
||||
assert!(remainder.is_zero());
|
||||
|
||||
Evaluations::<C::ScalarField>::from_vec_and_domain(G_evals.clone(), EH).interpolate();
|
||||
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
|
||||
// K(X) = (G(X)- F(alpha)*L_0(X)) / Z(X)
|
||||
let L0_e = &L_X[0] * F_alpha; // L0(X)*F(a) will be 0 in the native case
|
||||
let G_L0e = &G_X - &L0_e;
|
||||
// TODO move division by Z_X to the prev loop
|
||||
let (K_X, remainder) = G_L0e.divide_by_vanishing_poly(H).unwrap();
|
||||
assert!(remainder.is_zero());
|
||||
|
||||
let e_star =
|
||||
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
|
||||
|
||||
let mut phi_star: C = instance.phi.0 * L_X[0].evaluate(&gamma);
|
||||
for i in 0..k {
|
||||
phi_star += vec_instances[i].phi.0 * L_X[i].evaluate(&gamma);
|
||||
phi_star += vec_instances[i].phi.0 * L_X[i + 1].evaluate(&gamma);
|
||||
}
|
||||
let mut w_star: Vec<C::ScalarField> = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma));
|
||||
for i in 0..k {
|
||||
w_star = vec_add(
|
||||
&w_star,
|
||||
&vec_scalar_mul(&vec_w[i].w, &L_X[i].evaluate(&gamma)),
|
||||
&vec_scalar_mul(&vec_w[i].w, &L_X[i + 1].evaluate(&gamma)),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -144,15 +190,14 @@ where
|
||||
},
|
||||
Witness {
|
||||
w: w_star,
|
||||
r_w: w.r_w,
|
||||
r_w: w.r_w, // wip, fold also r_w (blinding used for the w commitment)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verifier(
|
||||
tr: &mut Transcript<C::ScalarField, C>,
|
||||
pedersen_params: &PedersenParams<C>,
|
||||
r1cs: R1CS<C::ScalarField>,
|
||||
transcript: &mut Transcript<C::ScalarField, C>,
|
||||
r1cs: &R1CS<C::ScalarField>,
|
||||
// running instance
|
||||
instance: CommittedInstance<C>,
|
||||
// incomming instances
|
||||
@@ -164,17 +209,14 @@ where
|
||||
let t = instance.betas.len();
|
||||
let n = r1cs.A[0].len();
|
||||
|
||||
let delta = tr.get_challenge();
|
||||
let delta = transcript.get_challenge();
|
||||
let deltas = powers_of_beta(delta, t);
|
||||
|
||||
tr.add_vec(&F_coeffs);
|
||||
transcript.add_vec(&F_coeffs);
|
||||
|
||||
let alpha = tr.get_challenge();
|
||||
let alpha = transcript.get_challenge();
|
||||
let alphas = all_powers(alpha, n);
|
||||
|
||||
// dbg!(instance.e);
|
||||
// dbg!(F_coeffs[0]);
|
||||
|
||||
// F(alpha) = e + \sum_t F_i * alpha^i
|
||||
let mut F_alpha = instance.e;
|
||||
for (i, F_i) in F_coeffs.iter().enumerate() {
|
||||
@@ -193,20 +235,21 @@ where
|
||||
.map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha)
|
||||
.collect();
|
||||
|
||||
let gamma = tr.get_challenge();
|
||||
let gamma = transcript.get_challenge();
|
||||
|
||||
let k = vec_instances.len();
|
||||
let domain_k = GeneralEvaluationDomain::<C::ScalarField>::new(k).unwrap();
|
||||
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(domain_k);
|
||||
let Z_X: DensePolynomial<C::ScalarField> = domain_k.vanishing_polynomial().into();
|
||||
let H = GeneralEvaluationDomain::<C::ScalarField>::new(k + 1).unwrap();
|
||||
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
|
||||
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
|
||||
let K_X: DensePolynomial<C::ScalarField> =
|
||||
DensePolynomial::<C::ScalarField>::from_coefficients_vec(K_coeffs);
|
||||
|
||||
let e_star =
|
||||
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
|
||||
|
||||
let mut phi_star: C = instance.phi.0 * L_X[0].evaluate(&gamma);
|
||||
for i in 0..k {
|
||||
phi_star += vec_instances[i].phi.0 * L_X[i].evaluate(&gamma);
|
||||
phi_star += vec_instances[i].phi.0 * L_X[i + 1].evaluate(&gamma);
|
||||
}
|
||||
|
||||
// return the folded instance
|
||||
@@ -226,12 +269,12 @@ fn pow_i<F: PrimeField>(i: usize, betas: &Vec<F>) -> F {
|
||||
let b = bit_decompose(i as u64, n as usize);
|
||||
|
||||
let mut r: F = F::one();
|
||||
for (j, beta_i) in betas.iter().enumerate() {
|
||||
for (j, beta_j) in betas.iter().enumerate() {
|
||||
let mut b_j = F::zero();
|
||||
if b[j] {
|
||||
b_j = F::one();
|
||||
}
|
||||
r *= (F::one() - b_j) + b_j * betas[j];
|
||||
r *= (F::one() - b_j) + b_j * beta_j;
|
||||
}
|
||||
r
|
||||
}
|
||||
@@ -244,20 +287,19 @@ fn pow_i_over_x<F: PrimeField>(i: usize, betas: &Vec<F>, deltas: &Vec<F>) -> Spa
|
||||
|
||||
let mut r: SparsePolynomial<F> =
|
||||
SparsePolynomial::<F>::from_coefficients_vec(vec![(0, F::one())]); // start with r(x) = 1
|
||||
for (j, beta_i) in betas.iter().enumerate() {
|
||||
for (j, beta_j) in betas.iter().enumerate() {
|
||||
if b[j] {
|
||||
let curr: SparsePolynomial<F> =
|
||||
SparsePolynomial::<F>::from_coefficients_vec(vec![(0, betas[j]), (1, deltas[j])]);
|
||||
SparsePolynomial::<F>::from_coefficients_vec(vec![(0, *beta_j), (1, deltas[j])]);
|
||||
r = r.mul(&curr);
|
||||
}
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
// method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300
|
||||
// lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300
|
||||
fn lagrange_polys<F: PrimeField>(domain_n: GeneralEvaluationDomain<F>) -> Vec<DensePolynomial<F>> {
|
||||
let mut lagrange_polynomials: Vec<DensePolynomial<F>> = Vec::new();
|
||||
|
||||
for i in 0..domain_n.size() {
|
||||
let evals: Vec<F> = cfg_into_iter!(0..domain_n.size())
|
||||
.map(|k| if k == i { F::one() } else { F::zero() })
|
||||
@@ -275,11 +317,10 @@ pub struct R1CS<F: PrimeField> {
|
||||
}
|
||||
|
||||
// f(w) in R1CS context
|
||||
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, w: &Vec<F>) -> Vec<F> {
|
||||
let AzBz = hadamard(&mat_vec_mul(&r1cs.A, &w), &mat_vec_mul(&r1cs.B, &w));
|
||||
let Cz = mat_vec_mul(&r1cs.C, &w);
|
||||
let f_w = vec_sub(&AzBz, &Cz);
|
||||
f_w
|
||||
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, w: &[F]) -> Vec<F> {
|
||||
let AzBz = hadamard(&mat_vec_mul(&r1cs.A, w), &mat_vec_mul(&r1cs.B, w));
|
||||
let Cz = mat_vec_mul(&r1cs.C, w);
|
||||
vec_sub(&AzBz, &Cz)
|
||||
}
|
||||
|
||||
fn check_instance<C: CurveGroup>(
|
||||
@@ -288,11 +329,8 @@ fn check_instance<C: CurveGroup>(
|
||||
w: Witness<C>,
|
||||
) -> bool {
|
||||
let n = 2_u64.pow(instance.betas.len() as u32) as usize;
|
||||
dbg!(n);
|
||||
dbg!(w.w.len());
|
||||
|
||||
let f_w = eval_f(&r1cs, &w.w); // f(w)
|
||||
dbg!(f_w.len());
|
||||
let f_w = eval_f(r1cs, &w.w); // f(w)
|
||||
|
||||
let mut r = C::ScalarField::zero();
|
||||
for i in 0..n {
|
||||
@@ -310,7 +348,6 @@ mod tests {
|
||||
use crate::pedersen::Pedersen;
|
||||
use crate::transcript::poseidon_test_config;
|
||||
use ark_bls12_381::{Fr, G1Projective};
|
||||
use ark_std::One;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
pub fn to_F_matrix<F: PrimeField>(M: Vec<Vec<usize>>) -> Vec<Vec<F>> {
|
||||
@@ -405,16 +442,22 @@ mod tests {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let t = 3;
|
||||
let n = 8;
|
||||
// let beta = Fr::rand(&mut rng);
|
||||
// let delta = Fr::rand(&mut rng);
|
||||
let beta = Fr::from(3);
|
||||
let delta = Fr::from(5);
|
||||
let beta = Fr::rand(&mut rng);
|
||||
let delta = Fr::rand(&mut rng);
|
||||
let betas = powers_of_beta(beta, t);
|
||||
let deltas = powers_of_beta(delta, t);
|
||||
|
||||
// for i in 0..n {
|
||||
// dbg!(pow_i_over_x(i, &betas, &deltas));
|
||||
// }
|
||||
// compute b + X*d, with X=rand
|
||||
let x = Fr::rand(&mut rng);
|
||||
let bxd = vec_add(&betas, &vec_scalar_mul(&deltas, &x));
|
||||
|
||||
// assert that computing pow_over_x of betas,deltas, is equivalent to first computing the
|
||||
// vector [betas+X*deltas] and then computing pow_i over it
|
||||
for i in 0..n {
|
||||
let pow_i1 = pow_i_over_x(i, &betas, &deltas);
|
||||
let pow_i2 = pow_i(i, &bxd);
|
||||
assert_eq!(pow_i1.evaluate(&x), pow_i2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -431,15 +474,15 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fold() {
|
||||
fn test_fold_native_case() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let pedersen_params = Pedersen::<G1Projective>::new_params(&mut rng, 100); // 100 is wip, will get it from actual vec
|
||||
let poseidon_config = poseidon_test_config::<Fr>();
|
||||
|
||||
let k = 5;
|
||||
let k = 6;
|
||||
|
||||
let r1cs = get_test_r1cs::<Fr>();
|
||||
let mut z = get_test_z::<Fr>(3);
|
||||
let z = get_test_z::<Fr>(3);
|
||||
let mut zs: Vec<Vec<Fr>> = Vec::new();
|
||||
for i in 0..k {
|
||||
let z_i = get_test_z::<Fr>(i + 4);
|
||||
@@ -452,8 +495,6 @@ mod tests {
|
||||
|
||||
let n = z.len();
|
||||
let t = log2(n) as usize;
|
||||
dbg!(n);
|
||||
dbg!(t);
|
||||
|
||||
let beta = Fr::rand(&mut rng);
|
||||
let betas = powers_of_beta(beta, t);
|
||||
@@ -489,30 +530,30 @@ mod tests {
|
||||
|
||||
let (F_coeffs, K_coeffs, folded_instance, folded_witness) = Folding::<G1Projective>::prover(
|
||||
&mut transcript_p,
|
||||
&pedersen_params,
|
||||
&r1cs,
|
||||
instance.clone(),
|
||||
witness,
|
||||
instances.clone(),
|
||||
witnesses,
|
||||
);
|
||||
dbg!(&F_coeffs);
|
||||
|
||||
// veriier
|
||||
let folded_instance_v = Folding::<G1Projective>::verifier(
|
||||
&mut transcript_v,
|
||||
&pedersen_params,
|
||||
r1cs.clone(), // TODO rm clone do borrow
|
||||
&r1cs,
|
||||
instance,
|
||||
instances,
|
||||
F_coeffs,
|
||||
K_coeffs,
|
||||
);
|
||||
|
||||
// check that prover & verifier folded instances are the same values
|
||||
assert_eq!(folded_instance.phi.0, folded_instance_v.phi.0);
|
||||
assert_eq!(folded_instance.betas, folded_instance_v.betas);
|
||||
assert_eq!(folded_instance.e, folded_instance_v.e);
|
||||
assert!(!folded_instance.e.is_zero());
|
||||
|
||||
// assert!(check_instance(&r1cs, folded_instance, folded_witness));
|
||||
// check that the folded instance satisfies the relation
|
||||
assert!(check_instance(&r1cs, folded_instance, folded_witness));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/// transcript.rs file and adapted from https://github.com/arnaucube/nova-study
|
||||
/// transcript.rs file adapted from https://github.com/arnaucube/nova-study
|
||||
use ark_ec::{AffineRepr, CurveGroup};
|
||||
use ark_ff::PrimeField;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use ark_ff::fields::PrimeField;
|
||||
use ark_std::cfg_iter;
|
||||
|
||||
pub fn vec_add<F: PrimeField>(a: &Vec<F>, b: &[F]) -> Vec<F> {
|
||||
pub fn vec_add<F: PrimeField>(a: &[F], b: &[F]) -> Vec<F> {
|
||||
let mut r: Vec<F> = vec![F::zero(); a.len()];
|
||||
for i in 0..a.len() {
|
||||
r[i] = a[i] + b[i];
|
||||
@@ -9,7 +9,7 @@ pub fn vec_add<F: PrimeField>(a: &Vec<F>, b: &[F]) -> Vec<F> {
|
||||
r
|
||||
}
|
||||
|
||||
pub fn vec_sub<F: PrimeField>(a: &Vec<F>, b: &Vec<F>) -> Vec<F> {
|
||||
pub fn vec_sub<F: PrimeField>(a: &[F], b: &[F]) -> Vec<F> {
|
||||
let mut r: Vec<F> = vec![F::zero(); a.len()];
|
||||
for i in 0..a.len() {
|
||||
r[i] = a[i] - b[i];
|
||||
@@ -45,7 +45,7 @@ pub fn mat_vec_mul<F: PrimeField>(M: &Vec<Vec<F>>, z: &[F]) -> Vec<F> {
|
||||
r
|
||||
}
|
||||
|
||||
pub fn hadamard<F: PrimeField>(a: &Vec<F>, b: &Vec<F>) -> Vec<F> {
|
||||
pub fn hadamard<F: PrimeField>(a: &[F], b: &[F]) -> Vec<F> {
|
||||
cfg_iter!(a).zip(b).map(|(a, b)| *a * b).collect()
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user