From 5a43ecd268785623b90a72518c419bdab49e0ae9 Mon Sep 17 00:00:00 2001 From: arnaucube Date: Wed, 19 Apr 2023 00:56:59 +0200 Subject: [PATCH] add compute T, start test of simple folding --- README.md | 2 +- src/nifs.rs | 125 ++++++++++++++++++++++++++++++++++++++++++++---- src/pedersen.rs | 54 +++++++++++++++------ src/utils.rs | 47 +++++++++++------- 4 files changed, 187 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 05edad6..baf7857 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,6 @@ Implementation of [Nova](https://eprint.iacr.org/2021/370.pdf) using [arkworks-r > Warning: Implementation from scratch to learn the internals of Nova. Do not use in production. -This repo is an ongoing implementation, not to be used but just to understand and experiment with the internals of the scheme and try experimental combinations. +This repo is an ongoing implementation, the code will be dirty for a while and not optimized and not to be used but just to understand and experiment with the internals of the scheme and try experimental combinations. Thanks to [levs57](https://twitter.com/levs57) for clarifications on the Nova folding. diff --git a/src/nifs.rs b/src/nifs.rs index 4707e84..fd8f9a3 100644 --- a/src/nifs.rs +++ b/src/nifs.rs @@ -2,16 +2,16 @@ use ark_ec::AffineRepr; use ark_std::ops::Add; use std::marker::PhantomData; -use crate::pedersen::Commitment; +use crate::pedersen::{Commitment, CommitmentVec}; use crate::r1cs::*; use crate::transcript::Transcript; use crate::utils::*; // Phi: φ in the paper (later 𝖴), a folded instance pub struct Phi { - cmE: Commitment, + cmE: Commitment, // TODO not Commitment but directly C (without rE) u: C::ScalarField, - cmW: Commitment, + cmW: Commitment, // TODO not Commitment but directly C (without rW) x: Vec, } @@ -23,25 +23,56 @@ pub struct FWit { rW: C::ScalarField, } +impl FWit { + pub fn commit(&self) -> Phi { + unimplemented!(); + } +} + pub struct NIFS { _phantom: PhantomData, } impl NIFS { + pub fn comp_T( + cs1: RelaxedR1CS, + cs2: RelaxedR1CS, + z1: &Vec, + z2: &Vec, + ) -> Vec { + // assuming cs1.R1CS == cs2.R1CS + let (A, B, C) = (cs1.ABC.A, cs1.ABC.B, cs1.ABC.C); + + // this is parallelizable (for the future) + let Az1 = matrix_vector_product(&A, &z1); + let Bz1 = matrix_vector_product(&B, &z1); + let Az1_Bz1 = hadamard_product(Az1, Bz1); + let Az2 = matrix_vector_product(&A, &z2); + let Bz2 = matrix_vector_product(&B, &z2); + let Az2_Bz2 = hadamard_product(Az2, Bz2); + let Cz2 = matrix_vector_product(&C, &z2); + let Cz1 = matrix_vector_product(&C, &z1); + let u1Cz2 = vector_elem_product(&Cz2, &cs1.u); + let u2Cz1 = vector_elem_product(&Cz1, &cs2.u); + // this will get simplifyied with future operators from Add trait + let T = vec_sub(vec_sub(vec_add(Az1_Bz1, Az2_Bz2), u1Cz2), u2Cz1); + T + } + pub fn fold_witness( r: C::ScalarField, - fw1: FWit, - fw2: FWit, + fw1: &FWit, + fw2: &FWit, T: Vec, ) -> FWit { let r2 = r * r; let E: Vec = vec_add( // TODO this syntax will be simplified with future operators impl - vec_add(fw1.E, vector_elem_product(&T, &r)), + vec_add(fw1.E.clone(), vector_elem_product(&T, &r)), vector_elem_product(&fw2.E, &r2), ); let rE = fw1.rE + r * fw2.rE; - let W = vec_add(fw1.W, vector_elem_product(&fw2.W, &r)); + let W = vec_add(fw1.W.clone(), vector_elem_product(&fw2.W, &r)); let rW = fw1.rW + r * fw2.rW; FWit:: { E: E.into(), @@ -55,7 +86,7 @@ impl NIFS { r: C::ScalarField, phi1: Phi, phi2: Phi, - cmT: Commitment, + cmT: CommitmentVec, ) -> Phi { let r2 = r * r; @@ -78,3 +109,81 @@ impl NIFS { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::pedersen::Pedersen; + use ark_bn254::{g1::G1Affine, Fr}; + use ark_ec::CurveGroup; + use ark_std::{ + rand::{Rng, RngCore}, + UniformRand, + }; + use ark_std::{One, Zero}; + use std::ops::Mul; + + #[test] + fn test_simple_folding() { + let mut rng = ark_std::test_rng(); + + // R1CS for: x^3 + x + 5 = y + let A = to_F_matrix::(vec![ + vec![0, 1, 0, 0, 0, 0], + vec![0, 0, 0, 1, 0, 0], + vec![0, 1, 0, 0, 1, 0], + vec![5, 0, 0, 0, 0, 1], + ]); + let B = to_F_matrix::(vec![ + vec![0, 1, 0, 0, 0, 0], + vec![0, 1, 0, 0, 0, 0], + vec![1, 0, 0, 0, 0, 0], + vec![1, 0, 0, 0, 0, 0], + ]); + let C = to_F_matrix::(vec![ + vec![0, 0, 0, 1, 0, 0], + vec![0, 0, 0, 0, 1, 0], + vec![0, 0, 0, 0, 0, 1], + vec![0, 0, 1, 0, 0, 0], + ]); + let z1 = to_F_vec::(vec![1, 3, 35, 9, 27, 30]); + let z2 = to_F_vec::(vec![1, 4, 73, 16, 64, 68]); + + let relaxed_r1cs_1 = R1CS:: { + A: A.clone(), + B: B.clone(), + C: C.clone(), + } + .relax(); + let relaxed_r1cs_2 = R1CS:: { A, B, C }.relax(); + + let T = NIFS::::comp_T(relaxed_r1cs_1, relaxed_r1cs_2, &z1, &z2); + let params = Pedersen::::new_params(&mut rng); + let cmT = Pedersen::commit_vec(&mut rng, ¶ms, &T); + + let r = Fr::rand(&mut rng); // this would come from the transcript + + // WIP TMP + let fw1 = FWit:: { + E: vec![Fr::zero(); T.len()], + rE: Fr::zero(), + W: z1, + rW: Fr::zero(), + }; + let fw2 = FWit:: { + E: vec![Fr::zero(); T.len()], + rE: Fr::zero(), + W: z2, + rW: Fr::zero(), + }; + + // fold witness + let folded_witness = NIFS::::fold_witness(r, &fw1, &fw2, T); + let phi1 = fw1.commit(); // <- unimplemented + let phi2 = fw2.commit(); + // fold instance + let folded_instance = NIFS::::fold_instance(r, phi1, phi2, cmT); + // naive check r1cs of the folded witness + // assert_eq!(hadamard_product(Az, Bz), vec_add(vector_elem_product(Cz, u), E)); + } +} diff --git a/src/pedersen.rs b/src/pedersen.rs index 2092ed1..202934e 100644 --- a/src/pedersen.rs +++ b/src/pedersen.rs @@ -1,8 +1,12 @@ use ark_ec::AffineRepr; -use ark_std::{rand::RngCore, UniformRand}; +use ark_std::{ + rand::{Rng, RngCore}, + UniformRand, +}; use std::marker::PhantomData; use crate::transcript::Transcript; +use crate::utils::naive_msm; pub struct Proof { R: C, @@ -20,15 +24,35 @@ pub struct Pedersen { } impl Pedersen { - pub fn commit( + pub fn new_params(rng: &mut R) -> Params { + let h_scalar = C::ScalarField::rand(rng); + let g: C = C::generator(); + let params: Params = Params:: { + g, + h: g.mul(h_scalar).into(), + }; + params + } + + pub fn commit_elem( rng: &mut R, params: &Params, v: &C::ScalarField, - ) -> (C, C::ScalarField) { + ) -> Commitment { let r = C::ScalarField::rand(rng); let cm: C = (params.g.mul(v) + params.h.mul(r)).into(); - (cm, r) + Commitment:: { cm, r } + } + pub fn commit_vec( + rng: &mut R, + params: &Params, + v: &Vec, + ) -> CommitmentVec { + let r: Vec = vec![C::rand(rng); v.len()]; // wip + let cm = naive_msm(v, &r); + CommitmentVec:: { cm, r } } + pub fn prove( params: &Params, transcript: &mut Transcript, @@ -73,13 +97,18 @@ impl Pedersen { } } +pub struct CommitmentVec { + // WIP + pub cm: C, + pub r: Vec, +} pub struct Commitment { pub cm: C, pub r: C::ScalarField, } impl Commitment { pub fn prove( - self, + &self, params: &Params, transcript: &mut Transcript, v: C::ScalarField, @@ -100,12 +129,7 @@ mod tests { let mut rng = ark_std::test_rng(); // setup params - let h_scalar = Fr::rand(&mut rng); - let g: G1Affine = G1Affine::generator(); - let params: Params = Params:: { - g, - h: g.mul(h_scalar).into_affine(), - }; + let params = Pedersen::::new_params(&mut rng); // init Prover's transcript let mut transcript_p: Transcript = Transcript::::new(); @@ -114,9 +138,11 @@ mod tests { let v = Fr::rand(&mut rng); - let (cm, r) = Pedersen::commit(&mut rng, ¶ms, &v); - let proof = Pedersen::prove(¶ms, &mut transcript_p, cm, v, r); - let v = Pedersen::verify(¶ms, &mut transcript_v, cm, proof); + let cm = Pedersen::commit_elem(&mut rng, ¶ms, &v); + let proof = cm.prove(¶ms, &mut transcript_p, v); + // also can use: + // let proof = Pedersen::prove(¶ms, &mut transcript_p, cm.cm, v, cm.r); + let v = Pedersen::verify(¶ms, &mut transcript_v, cm.cm, proof); assert!(v); } } diff --git a/src/utils.rs b/src/utils.rs index c167f52..903fe27 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,3 +1,4 @@ +use ark_ec::AffineRepr; use ark_ff::fields::PrimeField; use core::ops::Add; @@ -29,6 +30,16 @@ pub fn hadamard_product(a: Vec, b: Vec) -> Vec { r } +pub fn naive_msm(s: &Vec, p: &Vec) -> C { + // check lengths + + let mut r = p[0].mul(s[0]); + for i in 1..s.len() { + r = p[i].mul(s[i]); + } + r.into() +} + pub fn vec_add(a: Vec, b: Vec) -> Vec { let mut r: Vec = vec![F::zero(); a.len()]; for i in 0..a.len() { @@ -55,6 +66,24 @@ pub fn vec_sub(a: Vec, b: Vec) -> Vec { r } +pub fn to_F_matrix(M: Vec>) -> Vec> { + let mut R: Vec> = vec![Vec::new(); M.len()]; + for i in 0..M.len() { + R[i] = vec![F::zero(); M[i].len()]; + for j in 0..M[i].len() { + R[i][j] = F::from(M[i][j] as u64); + } + } + R +} +pub fn to_F_vec(z: Vec) -> Vec { + let mut r: Vec = vec![F::zero(); z.len()]; + for i in 0..z.len() { + r[i] = F::from(z[i] as u64); + } + r +} + #[cfg(test)] mod tests { use super::*; @@ -63,24 +92,6 @@ mod tests { use ark_std::{One, Zero}; use std::ops::Mul; - fn to_F_matrix(M: Vec>) -> Vec> { - let mut R: Vec> = vec![Vec::new(); M.len()]; - for i in 0..M.len() { - R[i] = vec![F::zero(); M[i].len()]; - for j in 0..M[i].len() { - R[i][j] = F::from(M[i][j] as u64); - } - } - R - } - fn to_F_vec(z: Vec) -> Vec { - let mut r: Vec = vec![F::zero(); z.len()]; - for i in 0..z.len() { - r[i] = F::from(z[i] as u64); - } - r - } - #[test] fn test_matrix_vector_product() { let A = to_F_matrix::(vec![