Add Transcript & TranscriptVar CS test, clean dependencies & clippy. Add GHA for tests

This commit is contained in:
2023-04-29 17:36:51 +02:00
parent 7cf729f00a
commit f9b3a66ddb
9 changed files with 172 additions and 173 deletions

View File

@@ -1,31 +1,25 @@
use ark_ec::AffineRepr;
use ark_ec::{CurveGroup, Group};
use ark_ff::{fields::Fp256, BigInteger, Field, PrimeField};
use ark_ec::CurveGroup;
use ark_ff::{Field, PrimeField};
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
bits::uint8::UInt8,
boolean::Boolean,
eq::EqGadget,
fields::{
fp::{AllocatedFp, FpVar},
nonnative::NonNativeFieldVar,
FieldVar,
},
groups::curves::short_weierstrass::ProjectiveVar,
fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToBitsGadget, ToBytesGadget, ToConstraintFieldGadget,
ToBitsGadget,
ToConstraintFieldGadget,
// groups::curves::short_weierstrass::ProjectiveVar,
};
// use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar;
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_std::ops::{Add, Mul, Sub};
use ark_crypto_primitives::crh::poseidon::{
constraints::{CRHGadget, CRHParametersVar},
CRH,
};
use ark_crypto_primitives::crh::{CRHScheme, CRHSchemeGadget};
use ark_crypto_primitives::snark::{FromFieldElementsGadget, SNARKGadget, SNARK};
// use ark_crypto_primitives::crh::poseidon::{
// constraints::{CRHGadget, CRHParametersVar},
// CRH,
// };
// use ark_crypto_primitives::crh::{CRHScheme, CRHSchemeGadget};
// use ark_crypto_primitives::snark::{FromFieldElementsGadget, SNARKGadget, SNARK};
use ark_crypto_primitives::sponge::constraints::CryptographicSpongeVar;
use ark_crypto_primitives::sponge::poseidon::{
constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge,
@@ -74,7 +68,7 @@ where
let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW.0), mode)?;
let x = NonNativeFieldVar::<C::ScalarField, ConstraintF<C>>::new_variable(
cs.clone(),
cs,
|| Ok(val.borrow().x),
mode,
)?;
@@ -108,20 +102,21 @@ where
phi1: PhiVar<C, GC>,
phi2: PhiVar<C, GC>,
phi3: PhiVar<C, GC>,
) -> Result<Boolean<ConstraintF<C>>, SynthesisError> {
) -> Result<(), SynthesisError> {
let r2 = r.square()?;
phi3.cmE.is_eq(
phi3.cmE.enforce_equal(
&(phi1.cmE
+ cmT.scalar_mul_le(r.to_bits_le()?.iter())?
+ phi2.cmE.scalar_mul_le(r2.to_bits_le()?.iter())?),
)?;
phi3.u.is_eq(&(phi1.u + r.clone() * phi2.u))?;
phi3.u.enforce_equal(&(phi1.u + r.clone() * phi2.u))?;
phi3.cmW
.is_eq(&(phi1.cmW + phi2.cmW.scalar_mul_le(r.to_bits_le()?.iter())?))?;
.enforce_equal(&(phi1.cmW + phi2.cmW.scalar_mul_le(r.to_bits_le()?.iter())?))?;
// wip x's check
phi3.x.is_eq(&(phi1.x + r.clone() * phi2.x))
phi3.x.enforce_equal(&(phi1.x + r * phi2.x))?;
Ok(())
}
}
@@ -173,8 +168,7 @@ where
})?; // r will come from transcript
// 1. phi.x == H(vk_nifs, i, z_0, z_i, phiBig)
let mut sponge =
PoseidonSpongeVar::<ConstraintF<C>>::new(cs.clone(), &self.poseidon_config);
let mut sponge = PoseidonSpongeVar::<ConstraintF<C>>::new(cs, &self.poseidon_config);
let input = vec![i, z_0, z_i];
sponge.absorb(&input)?;
let input = vec![
@@ -186,12 +180,12 @@ where
sponge.absorb(&input)?;
let h = sponge.squeeze_field_elements(1).unwrap();
let x_CF = phi.x.to_constraint_field()?; // phi.x on the ConstraintF<C>
x_CF[0].is_eq(&h[0])?; // review
x_CF[0].enforce_equal(&h[0])?; // review
// // 2. phi.cmE==0, phi.u==1
// <GC as CurveVar<C, ConstraintF<C>>>::is_zero(&phi.cmE)?;
phi.cmE.is_zero()?;
phi.u.is_one()?;
(phi.cmE.is_zero()?).enforce_equal(&Boolean::TRUE)?;
(phi.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
// 3. nifs.verify
NIFSGadget::<C, GC>::verify(r, cmT, phi, phiBig, phiOut)?;
@@ -232,28 +226,24 @@ mod test {
use crate::transcript::Transcript;
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{
rand::{Rng, RngCore},
UniformRand,
};
use ark_std::UniformRand;
use crate::nifs;
use crate::pedersen;
use crate::transcript::poseidon_test_config;
use ark_ec::CurveGroup;
use ark_ec::Group;
// use ark_ed_on_mnt4_298::{constraints::EdwardsVar, EdwardsProjective};
use crate::pedersen::Commitment;
use ark_mnt4_298::{constraints::G1Var as MNT4G1Var, Fq, Fr, G1Projective as MNT4G1Projective};
// use ark_mnt4_298::{constraints::G1Var as MNT4G1Var, G1Projective as MNT4G1Projective}
use ark_mnt4_298::{Fq, Fr};
use ark_mnt6_298::{constraints::G1Var as MNT6G1Var, G1Projective as MNT6G1Projective};
use ark_std::{One, Zero};
use ark_std::One;
// mnt4's Fr is the Constraint Field,
// while mnt4's Fq is the Field where we work, which is the C::ScalarField for C==MNT6G1
#[test]
fn test_phi_var() {
let mut rng = ark_std::test_rng();
let phi = Phi::<MNT6G1Projective> {
cmE: Commitment(MNT6G1Projective::generator()),
u: Fq::one(),
@@ -262,7 +252,7 @@ mod test {
};
let cs = ConstraintSystem::<Fr>::new_ref();
let phiVar =
let _phiVar =
PhiVar::<MNT6G1Projective, MNT6G1Var>::new_witness(cs.clone(), || Ok(phi)).unwrap();
// println!("num_constraints={:?}", cs.num_constraints());
}
@@ -275,17 +265,17 @@ mod test {
let cs = ConstraintSystem::<Fr>::new_ref();
let (r1cs, w1, w2, _, x1, x2, _) = nifs::gen_test_values::<_, Fq>(&mut rng);
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
let (r1cs, ws, _) = nifs::gen_test_values::<Fq>(2);
let (A, _, _) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
let r = Fq::rand(&mut rng); // this would come from the transcript
let fw1 = nifs::FWit::<MNT6G1Projective>::new(w1.clone(), A.len());
let fw2 = nifs::FWit::<MNT6G1Projective>::new(w2.clone(), A.len());
let fw1 = nifs::FWit::<MNT6G1Projective>::new(ws[0].clone(), A.len());
let fw2 = nifs::FWit::<MNT6G1Projective>::new(ws[1].clone(), A.len());
let mut transcript_p = Transcript::<Fq, MNT6G1Projective>::new(&poseidon_config);
let (fw3, phi1, phi2, T, cmT) = nifs::NIFS::<MNT6G1Projective>::P(
let (_fw3, phi1, phi2, _T, cmT) = nifs::NIFS::<MNT6G1Projective>::P(
&mut transcript_p,
&pedersen_params,
r,
@@ -305,9 +295,8 @@ mod test {
let cmTVar = MNT6G1Var::new_witness(cs.clone(), || Ok(cmT.0)).unwrap();
let rVar = NonNativeFieldVar::<Fq, Fr>::new_witness(cs.clone(), || Ok(r)).unwrap();
let valid = NIFSGadget::<MNT6G1Projective, MNT6G1Var>::verify(
rVar, cmTVar, phi1Var, phi2Var, phi3Var,
);
NIFSGadget::<MNT6G1Projective, MNT6G1Var>::verify(rVar, cmTVar, phi1Var, phi2Var, phi3Var)
.unwrap();
// println!("num_constraints={:?}", cs.num_constraints());
}
}

View File

@@ -1,7 +1,9 @@
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(unused)] // TMP
#![allow(clippy::upper_case_acronyms)]
// #![allow(unused)] // TMP
#![allow(dead_code)] // TMP
mod circuits;
mod nifs;

View File

@@ -1,10 +1,6 @@
// use ark_ec::AffineRepr;
use ark_ec::{CurveGroup, Group};
use ark_ff::fields::PrimeField;
use ark_std::{
rand::{Rng, RngCore},
UniformRand,
};
use ark_std::{One, Zero};
use std::marker::PhantomData;
@@ -50,13 +46,13 @@ where
}
}
pub fn commit(&self, params: &PedersenParams<C>) -> Phi<C> {
let cmE = Pedersen::commit(&params, &self.E, &self.rE);
let cmW = Pedersen::commit(&params, &self.W, &self.rW);
let cmE = Pedersen::commit(params, &self.E, &self.rE);
let cmW = Pedersen::commit(params, &self.W, &self.rW);
Phi {
cmE,
u: C::ScalarField::one(),
cmW,
x: self.W[0].clone(), // TODO WIP review
x: self.W[0], // TODO WIP review
}
}
}
@@ -75,18 +71,18 @@ where
r1cs: &R1CS<C::ScalarField>,
u1: C::ScalarField,
u2: C::ScalarField,
z1: &Vec<C::ScalarField>,
z2: &Vec<C::ScalarField>,
z1: &[C::ScalarField],
z2: &[C::ScalarField],
) -> Vec<C::ScalarField> {
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
// this is parallelizable (for the future)
let Az1 = matrix_vector_product(&A, &z1);
let Bz1 = matrix_vector_product(&B, &z1);
let Cz1 = matrix_vector_product(&C, &z1);
let Az2 = matrix_vector_product(&A, &z2);
let Bz2 = matrix_vector_product(&B, &z2);
let Cz2 = matrix_vector_product(&C, &z2);
let Az1 = matrix_vector_product(&A, z1);
let Bz1 = matrix_vector_product(&B, z1);
let Cz1 = matrix_vector_product(&C, z1);
let Az2 = matrix_vector_product(&A, z2);
let Bz2 = matrix_vector_product(&B, z2);
let Cz2 = matrix_vector_product(&C, z2);
let Az1_Bz2 = hadamard_product(Az1, Bz2);
let Az2_Bz1 = hadamard_product(Az2, Bz1);
@@ -109,18 +105,13 @@ where
let E: Vec<C::ScalarField> = vec_add(
// this syntax will be simplified with future operators impl (or at least a method
// for r-lin)
&vec_add(&fw1.E, &vector_elem_product(&T, &r)),
&vec_add(&fw1.E, &vector_elem_product(T, &r)),
&vector_elem_product(&fw2.E, &r2),
);
let rE = fw1.rE + r * rT + r2 * fw2.rE;
let W = vec_add(&fw1.W, &vector_elem_product(&fw2.W, &r));
let rW = fw1.rW + r * fw2.rW;
FWit::<C> {
E: E.into(),
rE,
W: W.into(),
rW,
}
FWit::<C> { E, rE, W, rW }
}
pub fn fold_instance(
@@ -146,6 +137,7 @@ where
}
// NIFS.P
#[allow(clippy::type_complexity)]
pub fn P(
tr: &mut Transcript<C::ScalarField, C>,
pedersen_params: &PedersenParams<C>,
@@ -155,25 +147,25 @@ where
fw2: FWit<C>,
) -> (FWit<C>, Phi<C>, Phi<C>, Vec<C::ScalarField>, Commitment<C>) {
// compute committed instances
let phi1 = fw1.commit(&pedersen_params); // wip
let phi2 = fw2.commit(&pedersen_params);
let phi1 = fw1.commit(pedersen_params); // wip
let phi2 = fw2.commit(pedersen_params);
// compute cross terms
let T = Self::comp_T(&r1cs, phi1.u, phi2.u, &fw1.W, &fw2.W);
let T = Self::comp_T(r1cs, phi1.u, phi2.u, &fw1.W, &fw2.W);
let rT = tr.get_challenge(); // r_T
let cmT = Pedersen::commit(&pedersen_params, &T, &rT);
let cmT = Pedersen::commit(pedersen_params, &T, &rT);
// fold witness
let fw3 = NIFS::<C>::fold_witness(r, &fw1, &fw2, &T, rT);
// fold committed instancs
// let phi3 = NIFS::<C>::fold_instance(r, &phi1, &phi2, &cmT);
return (fw3, phi1, phi2, T, cmT); // maybe return phi3
(fw3, phi1, phi2, T, cmT) // maybe return phi3
}
// NIFS.V
pub fn V(r: C::ScalarField, phi1: &Phi<C>, phi2: &Phi<C>, cmT: &Commitment<C>) -> Phi<C> {
NIFS::<C>::fold_instance(r, &phi1, &phi2, &cmT)
NIFS::<C>::fold_instance(r, phi1, phi2, cmT)
}
// verify commited folded instance (phi) relations
@@ -210,9 +202,9 @@ where
rT: C::ScalarField,
cmT: &Commitment<C>,
) -> (PedersenProof<C>, PedersenProof<C>, PedersenProof<C>) {
let cmE_proof = Pedersen::prove(&pedersen_params, tr, &phi.cmE, &fw.E, &fw.rE);
let cmW_proof = Pedersen::prove(&pedersen_params, tr, &phi.cmW, &fw.W, &fw.rW);
let cmT_proof = Pedersen::prove(&pedersen_params, tr, &cmT, &T, &rT);
let cmE_proof = Pedersen::prove(pedersen_params, tr, &phi.cmE, &fw.E, &fw.rE);
let cmW_proof = Pedersen::prove(pedersen_params, tr, &phi.cmW, &fw.W, &fw.rW);
let cmT_proof = Pedersen::prove(pedersen_params, tr, cmT, &T, &rT);
(cmE_proof, cmW_proof, cmT_proof)
}
pub fn verify_commitments(
@@ -224,13 +216,13 @@ where
cmW_proof: PedersenProof<C>,
cmT_proof: PedersenProof<C>,
) -> bool {
if !Pedersen::verify(&pedersen_params, tr, phi.cmE, cmE_proof) {
if !Pedersen::verify(pedersen_params, tr, phi.cmE, cmE_proof) {
return false;
}
if !Pedersen::verify(&pedersen_params, tr, phi.cmW, cmW_proof) {
if !Pedersen::verify(pedersen_params, tr, phi.cmW, cmW_proof) {
return false;
}
if !Pedersen::verify(&pedersen_params, tr, cmT, cmT_proof) {
if !Pedersen::verify(pedersen_params, tr, cmT, cmT_proof) {
return false;
}
true
@@ -238,9 +230,7 @@ where
}
// only for tests across different files
pub fn gen_test_values<R: Rng, F: PrimeField>(
rng: &mut R,
) -> (R1CS<F>, Vec<F>, Vec<F>, Vec<F>, Vec<F>, Vec<F>, Vec<F>) {
pub fn gen_test_values<F: PrimeField>(n: usize) -> (R1CS<F>, Vec<Vec<F>>, Vec<Vec<F>>) {
// R1CS for: x^3 + x + 5 = y (example from article
// https://www.vitalik.ca/general/2016/12/10/qap.html )
let A = to_F_matrix::<F>(vec![
@@ -261,21 +251,27 @@ pub fn gen_test_values<R: Rng, F: PrimeField>(
vec![0, 0, 0, 0, 0, 1],
vec![0, 0, 1, 0, 0, 0],
]);
// TODO in the future update this method to generate witness, and generate n witnesses
// instances, x: pub
let w1 = to_F_vec::<F>(vec![1, 3, 35, 9, 27, 30]);
let x1 = to_F_vec::<F>(vec![35]);
let w2 = to_F_vec::<F>(vec![1, 4, 73, 16, 64, 68]);
let x2 = to_F_vec::<F>(vec![73]);
let w3 = to_F_vec::<F>(vec![1, 5, 135, 25, 125, 130]);
let x3 = to_F_vec::<F>(vec![135]);
let r1cs = R1CS::<F> {
A: A.clone(),
B: B.clone(),
C: C.clone(),
};
(r1cs, w1, w2, w3, x1, x2, x3)
// generate n witnesses
let mut w: Vec<Vec<F>> = Vec::new();
let mut x: Vec<Vec<F>> = Vec::new();
for i in 0..n {
let input = 3 + i;
let w_i = to_F_vec::<F>(vec![
1,
input,
input * input * input + input + 5, // x^3 + x + 5
input * input, // x^2
input * input * input, // x^2 * x
input * input * input + input, // x^3 + x
]);
w.push(w_i.clone());
let x_i = to_F_vec::<F>(vec![input * input * input + input + 5]);
x.push(x_i.clone());
}
let r1cs = R1CS::<F> { A, B, C };
(r1cs, w, x)
}
#[cfg(test)]
@@ -283,14 +279,9 @@ mod tests {
use super::*;
use crate::pedersen::Pedersen;
use crate::transcript::poseidon_test_config;
use ark_ec::CurveGroup;
use ark_mnt4_298::{Fr, G1Projective};
use ark_std::{
rand::{Rng, RngCore},
UniformRand,
};
use ark_std::{One, Zero};
use std::ops::Mul;
use ark_std::One;
use ark_std::UniformRand;
// fold 2 instances into one
#[test]
@@ -299,19 +290,19 @@ mod tests {
let pedersen_params = Pedersen::<G1Projective>::new_params(&mut rng, 100); // 100 is wip, will get it from actual vec
let poseidon_config = poseidon_test_config::<Fr>();
let (r1cs, w1, w2, _, x1, x2, _) = gen_test_values(&mut rng);
let (r1cs, ws, _) = gen_test_values(2);
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
let r = Fr::rand(&mut rng); // this would come from the transcript
let fw1 = FWit::<G1Projective>::new(w1.clone(), A.len());
let fw2 = FWit::<G1Projective>::new(w2.clone(), A.len());
let fw1 = FWit::<G1Projective>::new(ws[0].clone(), A.len());
let fw2 = FWit::<G1Projective>::new(ws[1].clone(), A.len());
// get committed instances
let phi1 = fw1.commit(&pedersen_params); // wip
let phi2 = fw2.commit(&pedersen_params);
let T = NIFS::<G1Projective>::comp_T(&r1cs, phi1.u, phi2.u, &w1, &w2);
let T = NIFS::<G1Projective>::comp_T(&r1cs, phi1.u, phi2.u, &ws[0], &ws[1]);
let rT: Fr = Fr::rand(&mut rng);
let cmT = Pedersen::commit(&pedersen_params, &T, &rT);
@@ -363,6 +354,7 @@ mod tests {
cmW_proof,
cmT_proof,
);
assert!(v);
}
// fold i_1, i_2 instances into i_12, and then i_12, i_3 into i_123
@@ -372,19 +364,19 @@ mod tests {
let pedersen_params = Pedersen::<G1Projective>::new_params(&mut rng, 6);
let poseidon_config = poseidon_test_config::<Fr>();
let (r1cs, w1, w2, w3, x1, x2, x3) = gen_test_values(&mut rng);
let (r1cs, ws, _) = gen_test_values(3);
let u1: Fr = Fr::one();
let u2: Fr = Fr::one();
let T_12 = NIFS::<G1Projective>::comp_T(&r1cs, u1, u2, &w1, &w2);
let T_12 = NIFS::<G1Projective>::comp_T(&r1cs, u1, u2, &ws[0], &ws[1]);
let rT_12: Fr = Fr::rand(&mut rng);
let cmT_12 = Pedersen::commit(&pedersen_params, &T_12, &rT_12);
let r = Fr::rand(&mut rng); // this would come from the transcript
let fw1 = FWit::<G1Projective>::new(w1, T_12.len());
let fw2 = FWit::<G1Projective>::new(w2, T_12.len());
let fw1 = FWit::<G1Projective>::new(ws[0].clone(), T_12.len());
let fw2 = FWit::<G1Projective>::new(ws[1].clone(), T_12.len());
// fold witness
let fw_12 = NIFS::<G1Projective>::fold_witness(r, &fw1, &fw2, &T_12, rT_12);
@@ -403,7 +395,7 @@ mod tests {
//----
// 2nd fold
let fw3 = FWit::<G1Projective>::new(w3, r1cs.A.len());
let fw3 = FWit::<G1Projective>::new(ws[2].clone(), r1cs.A.len());
// compute cross terms
let T_123 = NIFS::<G1Projective>::comp_T(&r1cs, phi_12.u, Fr::one(), &fw_12.W, &fw3.W);
@@ -477,23 +469,23 @@ mod tests {
let pedersen_params = Pedersen::<G1Projective>::new_params(&mut rng, 100); // 100 is wip, will get it from actual vec
let poseidon_config = poseidon_test_config::<Fr>();
let (r1cs, w1, w2, _, x1, x2, _) = gen_test_values(&mut rng);
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
let (r1cs, ws, _) = gen_test_values(3);
let (A, _, _) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
let r = Fr::rand(&mut rng); // this would come from the transcript
let fw1 = FWit::<G1Projective>::new(w1.clone(), A.len());
let fw2 = FWit::<G1Projective>::new(w2.clone(), A.len());
let fw1 = FWit::<G1Projective>::new(ws[0].clone(), A.len());
let fw2 = FWit::<G1Projective>::new(ws[1].clone(), A.len());
// init Prover's transcript
let mut transcript_p = Transcript::<Fr, G1Projective>::new(&poseidon_config);
// NIFS.P
let (fw3, phi1, phi2, T, cmT) =
let (_fw3, phi1, phi2, _T, cmT) =
NIFS::<G1Projective>::P(&mut transcript_p, &pedersen_params, r, &r1cs, fw1, fw2);
// init Verifier's transcript
let mut transcript_v = Transcript::<Fr, G1Projective>::new(&poseidon_config);
// let mut transcript_v = Transcript::<Fr, G1Projective>::new(&poseidon_config);
// NIFS.V
let phi3 = NIFS::<G1Projective>::V(r, &phi1, &phi2, &cmT);

View File

@@ -1,4 +1,3 @@
use ark_ec::AffineRepr;
use ark_ec::{CurveGroup, Group};
use ark_std::{
rand::{Rng, RngCore},
@@ -10,7 +9,6 @@ use crate::utils::{naive_msm, vec_add, vector_elem_product};
use crate::transcript::Transcript;
use ark_crypto_primitives::sponge::Absorb;
use ark_ff::PrimeField;
pub struct Proof_elem<C: CurveGroup> {
R: C,
@@ -60,7 +58,7 @@ where
v: &C::ScalarField,
) -> CommitmentElem<C> {
let r = C::ScalarField::rand(rng);
let cm: C = (params.g.mul(v) + params.h.mul(r));
let cm: C = params.g.mul(v) + params.h.mul(r);
CommitmentElem::<C> { cm, r }
}
pub fn commit(
@@ -82,7 +80,7 @@ where
let r1 = transcript.get_challenge();
let r2 = transcript.get_challenge();
let R: C = (params.g.mul(r1) + params.h.mul(r2));
let R: C = params.g.mul(r1) + params.h.mul(r2);
transcript.add_point(&cm);
transcript.add_point(&R);
@@ -109,7 +107,7 @@ where
transcript.add_point(&R);
let e = transcript.get_challenge();
let u_ = vec_add(&vector_elem_product(&v, &e), &d);
let u_ = vec_add(&vector_elem_product(v, &e), &d);
let ru_ = e * r + r1;
Proof::<C> { R, u_, ru_ }
@@ -183,9 +181,7 @@ where
mod tests {
use super::*;
use crate::transcript::poseidon_test_config;
use ark_ec::CurveGroup;
use ark_mnt4_298::{Fr, G1Projective};
use std::ops::Mul;
#[test]
fn test_pedersen_single_element() {

View File

@@ -1,19 +1,14 @@
// this file contains a sum-check protocol initial implementation, not used by the rest of the repo
// but implemented as an exercise and it will probably be used in the future.
use ark_ec::{CurveGroup, Group};
use ark_ec::CurveGroup;
use ark_ff::{BigInteger, PrimeField};
use ark_poly::{
multivariate::{SparsePolynomial, SparseTerm, Term},
univariate::DensePolynomial,
DenseMVPolynomial, DenseUVPolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
SparseMultilinearExtension,
DenseMVPolynomial, DenseUVPolynomial, Polynomial,
};
use ark_std::cfg_into_iter;
use ark_std::log2;
use ark_std::marker::PhantomData;
use ark_std::ops::Mul;
use ark_std::{rand::Rng, UniformRand};
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
@@ -61,7 +56,7 @@ where
new_term = vec![];
break;
} else {
new_coef = new_coef * v.pow([(*power) as u64]);
new_coef *= v.pow([(*power) as u64]);
}
}
_ => {
@@ -119,7 +114,7 @@ where
}
for i in 0..n_vars {
let k = F::from(iter_num as u64).into_bigint().to_bytes_le();
let bit = k[(i / 8) as usize] & (1 << (i % 8));
let bit = k[i / 8] & (1 << (i % 8));
if bit == 0 {
p[none_pos + i] = Some(F::zero());
} else {
@@ -143,7 +138,7 @@ where
for i in 0..(2_u64.pow(v as u32) as usize) {
let p = Self::point_complete(vec![], v, i);
H = H + g.evaluate(&p.into());
H += g.evaluate(&p.into());
}
transcript.add(&H);
@@ -210,6 +205,12 @@ mod tests {
use super::*;
use crate::transcript::poseidon_test_config;
use ark_mnt4_298::{Fr, G1Projective}; // scalar field
use ark_poly::{
multivariate::{SparsePolynomial, SparseTerm, Term},
univariate::DensePolynomial,
DenseMVPolynomial, DenseUVPolynomial,
};
use ark_std::{rand::Rng, UniformRand};
#[test]
fn test_new_point() {
@@ -286,7 +287,6 @@ mod tests {
#[test]
fn test_flow_hardcoded_values() {
let mut rng = ark_std::test_rng();
// g(X_0, X_1, X_2) = 2 X_0^3 + X_0 X_2 + X_1 X_2
let terms = vec![
(Fr::from(2u32), SparseTerm::new(vec![(0_usize, 3)])),

View File

@@ -1,6 +1,5 @@
use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_ec::{AffineRepr, CurveGroup};
use ark_ff::PrimeField;
use ark_serialize::CanonicalSerialize;
use std::marker::PhantomData;
use ark_r1cs_std::fields::fp::FpVar;
@@ -13,8 +12,6 @@ use ark_crypto_primitives::sponge::{
};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_poly::DenseUVPolynomial;
pub struct Transcript<F: PrimeField + Absorb, C: CurveGroup>
where
<C as CurveGroup>::BaseField: Absorb,
@@ -28,7 +25,7 @@ where
<C as CurveGroup>::BaseField: Absorb,
{
pub fn new(poseidon_config: &PoseidonConfig<F>) -> Self {
let mut sponge = PoseidonSponge::<F>::new(&poseidon_config);
let sponge = PoseidonSponge::<F>::new(poseidon_config);
Transcript {
sponge,
_c: PhantomData,
@@ -62,8 +59,8 @@ pub struct TranscriptVar<F: PrimeField> {
sponge: PoseidonSpongeVar<F>,
}
impl<F: PrimeField> TranscriptVar<F> {
pub fn new(cs: ConstraintSystemRef<F>, poseidon_config: PoseidonConfig<F>) -> Self {
let mut sponge = PoseidonSpongeVar::<F>::new(cs.clone(), &poseidon_config);
pub fn new(cs: ConstraintSystemRef<F>, poseidon_config: &PoseidonConfig<F>) -> Self {
let sponge = PoseidonSpongeVar::<F>::new(cs, poseidon_config);
Self { sponge }
}
pub fn add(&mut self, v: FpVar<F>) -> Result<(), SynthesisError> {
@@ -71,12 +68,12 @@ impl<F: PrimeField> TranscriptVar<F> {
}
pub fn get_challenge(&mut self) -> Result<FpVar<F>, SynthesisError> {
let c = self.sponge.squeeze_field_elements(1)?;
self.sponge.absorb(&c[0]);
self.sponge.absorb(&c[0])?;
Ok(c[0].clone())
}
pub fn get_challenge_vec(&mut self, n: usize) -> Result<Vec<FpVar<F>>, SynthesisError> {
let c = self.sponge.squeeze_field_elements(n)?;
self.sponge.absorb(&c);
self.sponge.absorb(&c)?;
Ok(c)
}
}
@@ -109,17 +106,26 @@ pub fn poseidon_test_config<F: PrimeField>() -> PoseidonConfig<F> {
)
}
// #[cfg(test)]
// mod tests {
// use super::*;
// use ark_mnt4_298::{Fr, G1Projective}; // scalar field
// use ark_std::{One, Zero};
//
// #[test]
// fn test_poseidon() {
// let config = poseidon_test_config::<Fr>();
// let mut tr = Transcript::<Fr, G1Projective>::new(&config);
// tr.add(&Fr::one());
// println!("c {:?}", tr.get_challenge().to_string());
// }
// }
#[cfg(test)]
mod tests {
use super::*;
use ark_mnt4_298::{Fr, G1Projective}; // scalar field
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
#[test]
fn test_transcript_and_transcriptvar() {
let config = poseidon_test_config::<Fr>();
let mut tr = Transcript::<Fr, G1Projective>::new(&config);
tr.add(&Fr::from(42_u32));
let c = tr.get_challenge();
let cs = ConstraintSystem::<Fr>::new_ref();
let mut tr_var = TranscriptVar::<Fr>::new(cs.clone(), &config);
let v = FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::from(42_u32))).unwrap();
tr_var.add(v).unwrap();
let c_var = tr_var.get_challenge().unwrap();
assert_eq!(c, c_var.value().unwrap());
}
}

View File

@@ -1,4 +1,3 @@
use ark_ec::AffineRepr;
use ark_ec::CurveGroup;
use ark_ff::fields::PrimeField;
use core::ops::{Add, Sub};
@@ -12,7 +11,9 @@ pub fn vector_elem_product<F: PrimeField>(a: &Vec<F>, e: &F) -> Vec<F> {
}
r
}
pub fn matrix_vector_product<F: PrimeField>(M: &Vec<Vec<F>>, z: &Vec<F>) -> Vec<F> {
#[allow(clippy::needless_range_loop)]
pub fn matrix_vector_product<F: PrimeField>(M: &Vec<Vec<F>>, z: &[F]) -> Vec<F> {
// TODO assert len
let mut r: Vec<F> = vec![F::zero(); M.len()];
for i in 0..M.len() {
@@ -37,7 +38,7 @@ pub fn hadamard_product<F: PrimeField>(a: Vec<F>, b: Vec<F>) -> Vec<F> {
// vec_add(a, vector_elem_product(&b, &r)) // WIP probably group loops
// }
pub fn naive_msm<C: CurveGroup>(s: &Vec<C::ScalarField>, p: &Vec<C>) -> C {
pub fn naive_msm<C: CurveGroup>(s: &Vec<C::ScalarField>, p: &[C]) -> C {
// TODO check lengths, or at least check s.len()>= p.len()
let mut r = p[0].mul(s[0]);
@@ -47,7 +48,7 @@ pub fn naive_msm<C: CurveGroup>(s: &Vec<C::ScalarField>, p: &Vec<C>) -> C {
r
}
pub fn vec_add<F: PrimeField>(a: &Vec<F>, b: &Vec<F>) -> Vec<F> {
pub fn vec_add<F: PrimeField>(a: &Vec<F>, b: &[F]) -> Vec<F> {
let mut r: Vec<F> = vec![F::zero(); a.len()];
for i in 0..a.len() {
r[i] = a[i] + b[i];
@@ -83,7 +84,7 @@ impl<F: PrimeField> Add<Ve<F>> for Ve<F> {
type Output = Ve<F>;
fn add(self, rhs_vec: Self) -> Ve<F> {
let lhs = self.0.clone();
let rhs = rhs_vec.0.clone();
let rhs = rhs_vec.0;
let mut r: Vec<F> = vec![F::zero(); lhs.len()];
for i in 0..self.0.len() {
r[i] = lhs[i] + rhs[i];
@@ -95,7 +96,7 @@ impl<F: PrimeField> Sub<Ve<F>> for Ve<F> {
type Output = Ve<F>;
fn sub(self, rhs_vec: Self) -> Ve<F> {
let lhs = self.0.clone();
let rhs = rhs_vec.0.clone();
let rhs = rhs_vec.0;
let mut r: Vec<F> = vec![F::zero(); lhs.len()];
for i in 0..self.0.len() {
r[i] = lhs[i] - rhs[i];
@@ -125,10 +126,7 @@ pub fn to_F_vec<F: PrimeField>(z: Vec<usize>) -> Vec<F> {
#[cfg(test)]
mod tests {
use super::*;
use ark_ec::CurveGroup;
use ark_mnt4_298::{g1::G1Affine, Fr};
use ark_std::{One, Zero};
use std::ops::Mul;
use ark_mnt4_298::Fr;
#[test]
fn test_matrix_vector_product() {