commit 69c6b262058dde0e53a30142b2d43e98cf83aa0d Author: Daniel Tehrani Date: Fri Jul 28 12:22:51 2023 -0700 Init commit! diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..96ef6c0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..8df25a5 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,5 @@ +[workspace] +members = [ + "tensor_pcs", + "shockwave_plus" +] \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..1e485a5 --- /dev/null +++ b/README.md @@ -0,0 +1,36 @@ +# Shockwave+ + +## Overview + +Shockwave is a variant of [Brakedown](https://eprint.iacr.org/2021/1043) that uses Reed-Solomon code instead of a linear-time encodable code. **Shockwave+** is an extension of Shockwave that works over all finite fields by using [ECFFT](https://arxiv.org/pdf/2107.08473.pdf) instead of FFT for low-degree extension of polynomial evaluations. + +Brakedown has a linear-time prover and is *field-agnostic* (i.e. works over all finite fields), but its proofs are concretely larger than Shockwave’s. + +Shockwave provides shorter proofs and lower verification time but requires an FFT-friendly field to achieve $O (n\log{n})$ proving time. + +Shockwave+ inherits the smaller proofs of Shockwave and is also *field-agnostic*. It uses the EXTEND operation from [ECFFT](https://arxiv.org/pdf/2107.08473.pdf) to run Reed-Solomon encoding in $n\log{n}$ time. + +**Crates** +[shockwave_plus](/shockwave_plus/) contains the prover/verifier for a zero-knowledge proof of R1CS satisfiability. It’s based on the PIOP from [Spartan](https://eprint.iacr.org/2019/550.pdf), and uses the multilinear polynomial commitment scheme implemented in [tensor_pcs](/tensor_pcs/). + +**Zero-Knowledge** + +We use the zero-knowledge sum-check protocol from Libra to transform the Spartan PIOP into a zero-knowledge PIOP. And use a technique from [BCG+17](https://eprint.iacr.org/2017/872.pdf) to make the polynomial commitment scheme zero-knowledge. + + + +The EXTEND operation is implemented in a separate crate [ecfft](https://github.com/DanTehrani/ecfft) and is used in [tensor_pcs](/tensor_pcs/). + +## Benchmarks + +TBD + +## Future work + +- [ ] Support richer frontends (CCS, PLONKish). +- [ ] Employ *self-recursion* techniques from [Vortex](https://eprint.iacr.org/2022/1633.pdf)/[Orion](https://eprint.iacr.org/2022/1010.pdf) to make the proofs smaller. + +## Run tests +```bash +cargo test +``` \ No newline at end of file diff --git a/shockwave_plus/Cargo.toml b/shockwave_plus/Cargo.toml new file mode 100644 index 0000000..c962a1b --- /dev/null +++ b/shockwave_plus/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "shockwave-plus" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ark-std = "0.4.0" +bincode = "1.3.3" +halo2curves = { version = "0.1.0", features = ["derive_serde"] } +rand = "0.8.5" +tensor-pcs = { path = "../tensor_pcs" } +serde = { version = "1.0.152", features = ["derive"] } + +[dev-dependencies] +criterion = { version = "0.4", features = ["html_reports"] } + +[[bench]] +name = "prove" +harness = false + +[features] +print-trace = ["ark-std/print-trace"] diff --git a/shockwave_plus/benches/prove.rs b/shockwave_plus/benches/prove.rs new file mode 100644 index 0000000..c9cc900 --- /dev/null +++ b/shockwave_plus/benches/prove.rs @@ -0,0 +1,41 @@ +#![allow(non_snake_case)] +use criterion::{criterion_group, criterion_main, Criterion}; +use shockwave_plus::ShockwavePlus; +use shockwave_plus::R1CS; +use tensor_pcs::Transcript; + +fn shockwave_plus_bench(c: &mut Criterion) { + type F = halo2curves::secp256k1::Fp; + + for exp in [12, 15, 18] { + let num_cons = 2usize.pow(exp); + let num_vars = num_cons; + let num_input = 0; + + let (r1cs, witness) = R1CS::::produce_synthetic_r1cs(num_cons, num_vars, num_input); + + let mut group = c.benchmark_group(format!("ShockwavePlus num_cons: {}", num_cons)); + let l = 319; + let num_rows = (((2f64 / l as f64).sqrt() * (num_vars as f64).sqrt()) as usize) + .next_power_of_two() + / 2; + let ShockwavePlus = ShockwavePlus::new(r1cs.clone(), l, num_rows); + group.bench_function("prove", |b| { + b.iter(|| { + let mut transcript = Transcript::new(b"bench"); + ShockwavePlus.prove(&witness, &mut transcript); + }) + }); + } +} + +fn set_duration() -> Criterion { + Criterion::default().sample_size(10) +} + +criterion_group! { + name = benches; + config = set_duration(); + targets = shockwave_plus_bench +} +criterion_main!(benches); diff --git a/shockwave_plus/src/lib.rs b/shockwave_plus/src/lib.rs new file mode 100644 index 0000000..2a031a5 --- /dev/null +++ b/shockwave_plus/src/lib.rs @@ -0,0 +1,272 @@ +#![allow(non_snake_case)] +mod polynomial; +mod r1cs; +mod sumcheck; +mod utils; + +use ark_std::{end_timer, start_timer}; +use serde::{Deserialize, Serialize}; +use sumcheck::{SCPhase1Proof, SCPhase2Proof, SumCheckPhase1, SumCheckPhase2}; + +// Exports +pub use r1cs::R1CS; +pub use tensor_pcs::*; + +#[derive(Serialize, Deserialize)] +pub struct PartialSpartanProof { + pub z_comm: [u8; 32], + pub sc_proof_1: SCPhase1Proof, + pub sc_proof_2: SCPhase2Proof, + pub z_eval_proof: TensorMLOpening, + pub v_A: F, + pub v_B: F, + pub v_C: F, +} + +pub struct FullSpartanProof { + pub partial_proof: PartialSpartanProof, + pub A_eval_proof: TensorMLOpening, + pub B_eval_proof: TensorMLOpening, + pub C_eval_proof: TensorMLOpening, +} + +pub struct ShockwavePlus { + pub r1cs: R1CS, + pub pcs_witness: TensorMultilinearPCS, +} + +impl ShockwavePlus { + pub fn new(r1cs: R1CS, l: usize, num_rows: usize) -> Self { + let num_cols = r1cs.num_vars / num_rows; + + // Make sure that there are enough columns to run the l queries + assert!(num_cols > l); + + let expansion_factor = 2; + + let ecfft_config = rs_config::ecfft::gen_config(num_cols); + + let pcs_config = TensorRSMultilinearPCSConfig:: { + expansion_factor, + domain_powers: None, + fft_domain: None, + ecfft_config: Some(ecfft_config), + l, + num_entries: r1cs.num_vars, + num_rows, + }; + + let pcs_witness = TensorMultilinearPCS::new(pcs_config); + Self { r1cs, pcs_witness } + } + + pub fn prove( + &self, + witness: &[F], + transcript: &mut Transcript, + ) -> (PartialSpartanProof, Vec) { + // Compute the multilinear extension of the witness + assert!(witness.len().is_power_of_two()); + let witness_poly = SparseMLPoly::from_dense(witness.to_vec()); + + // Commit the witness polynomial + let comm_witness_timer = start_timer!(|| "Commit witness"); + let committed_witness = self.pcs_witness.commit(&witness_poly); + let witness_comm = committed_witness.committed_tree.root; + end_timer!(comm_witness_timer); + + transcript.append_bytes(&witness_comm); + + // ############################ + // Phase 1: The sum-checks + // ################### + + let m = (self.r1cs.num_vars as f64).log2() as usize; + let tau = transcript.challenge_vec(m); + let mut tau_rev = tau.clone(); + tau_rev.reverse(); + + // First + // Compute the multilinear extension of the R1CS matrices. + // Prove that he Q_poly is a zero-polynomial + + // Q_poly is a zero-polynomial iff F_io evaluates to zero + // over the m-dimensional boolean hypercube.. + + // We prove using the sum-check protocol. + + // G_poly = A_poly * B_poly - C_poly + + let num_rows = self.r1cs.num_cons; + let Az_poly = self.r1cs.A.mul_vector(num_rows, witness); + let Bz_poly = self.r1cs.B.mul_vector(num_rows, witness); + let Cz_poly = self.r1cs.C.mul_vector(num_rows, witness); + + // Prove that the polynomial Q(t) + // \sum_{x \in {0, 1}^m} (Az_poly(x) * Bz_poly(x) - Cz_poly(x)) eq(tau, x) + // is a zero-polynomial using the sum-check protocol. + + let rx = transcript.challenge_vec(m); + let mut rx_rev = rx.clone(); + rx_rev.reverse(); + + let sc_phase_1_timer = start_timer!(|| "Sumcheck phase 1"); + + let sc_phase_1 = SumCheckPhase1::new( + Az_poly.clone(), + Bz_poly.clone(), + Cz_poly.clone(), + tau_rev.clone(), + rx.clone(), + ); + let (sc_proof_1, (v_A, v_B, v_C)) = sc_phase_1.prove(transcript); + end_timer!(sc_phase_1_timer); + + transcript.append_fe(&v_A); + transcript.append_fe(&v_B); + transcript.append_fe(&v_C); + + // Phase 2 + let r = transcript.challenge_vec(3); + + // T_2 should equal teh evaluations of the random linear combined polynomials + + let ry = transcript.challenge_vec(m); + let sc_phase_2_timer = start_timer!(|| "Sumcheck phase 2"); + let sc_phase_2 = SumCheckPhase2::new( + self.r1cs.A.clone(), + self.r1cs.B.clone(), + self.r1cs.C.clone(), + witness.to_vec(), + rx.clone(), + r.as_slice().try_into().unwrap(), + ry.clone(), + ); + + let sc_proof_2 = sc_phase_2.prove(transcript); + end_timer!(sc_phase_2_timer); + + let mut ry_rev = ry.clone(); + ry_rev.reverse(); + let z_open_timer = start_timer!(|| "Open witness poly"); + // Prove the evaluation of the polynomial Z(y) at ry + let z_eval_proof = + self.pcs_witness + .open(&committed_witness, &witness_poly, &ry_rev, transcript); + end_timer!(z_open_timer); + + // Prove the evaluation of the polynomials A(y), B(y), C(y) at ry + + let rx_ry = vec![ry_rev, rx_rev].concat(); + ( + PartialSpartanProof { + z_comm: witness_comm, + sc_proof_1, + sc_proof_2, + z_eval_proof, + v_A, + v_B, + v_C, + }, + rx_ry, + ) + } + + pub fn verify_partial( + &self, + partial_proof: &PartialSpartanProof, + transcript: &mut Transcript, + ) { + partial_proof.z_comm.append_to_transcript(transcript); + + let A_mle = self.r1cs.A.to_ml_extension(); + let B_mle = self.r1cs.B.to_ml_extension(); + let C_mle = self.r1cs.C.to_ml_extension(); + + let m = (self.r1cs.num_vars as f64).log2() as usize; + let tau = transcript.challenge_vec(m); + let rx = transcript.challenge_vec(m); + let mut rx_rev = rx.clone(); + rx_rev.reverse(); + + transcript.append_fe(&partial_proof.sc_proof_1.blinder_poly_sum); + let rho = transcript.challenge_fe(); + + let ex = SumCheckPhase1::verify_round_polys(&partial_proof.sc_proof_1, &rx, rho); + + // The final eval should equal + let v_A = partial_proof.v_A; + let v_B = partial_proof.v_B; + let v_C = partial_proof.v_C; + + let T_1_eq = EqPoly::new(tau); + let T_1 = (v_A * v_B - v_C) * T_1_eq.eval(&rx_rev) + + rho * partial_proof.sc_proof_1.blinder_poly_eval_claim; + assert_eq!(T_1, ex); + + transcript.append_fe(&v_A); + transcript.append_fe(&v_B); + transcript.append_fe(&v_C); + + let r = transcript.challenge_vec(3); + let r_A = r[0]; + let r_B = r[1]; + let r_C = r[2]; + + let ry = transcript.challenge_vec(m); + + transcript.append_fe(&partial_proof.sc_proof_2.blinder_poly_sum); + let rho_2 = transcript.challenge_fe(); + + let T_2 = + (r_A * v_A + r_B * v_B + r_C * v_C) + rho_2 * partial_proof.sc_proof_2.blinder_poly_sum; + let final_poly_eval = + SumCheckPhase2::verify_round_polys(T_2, &partial_proof.sc_proof_2, &ry); + + let mut ry_rev = ry.clone(); + ry_rev.reverse(); + + let rx_ry = [rx, ry].concat(); + assert_eq!(partial_proof.z_eval_proof.x, ry_rev); + + let z_eval = partial_proof.z_eval_proof.y; + let A_eval = A_mle.eval(&rx_ry); + let B_eval = B_mle.eval(&rx_ry); + let C_eval = C_mle.eval(&rx_ry); + + self.pcs_witness.verify( + &partial_proof.z_eval_proof, + &partial_proof.z_comm, + transcript, + ); + + let T_opened = (r_A * A_eval + r_B * B_eval + r_C * C_eval) * z_eval + + rho_2 * partial_proof.sc_proof_2.blinder_poly_eval_claim; + assert_eq!(T_opened, final_poly_eval); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_shockwave_plus() { + type F = halo2curves::secp256k1::Fp; + + let num_cons = 2usize.pow(6); + let num_vars = num_cons; + let num_input = 0; + let l = 10; + + let (r1cs, witness) = R1CS::::produce_synthetic_r1cs(num_cons, num_vars, num_input); + + let num_rows = 4; + let ShockwavePlus = ShockwavePlus::new(r1cs.clone(), l, num_rows); + let mut prover_transcript = Transcript::new(b"bench"); + let (partial_proof, _) = ShockwavePlus.prove(&witness, &mut prover_transcript); + + let mut verifier_transcript = Transcript::new(b"bench"); + ShockwavePlus.verify_partial(&partial_proof, &mut verifier_transcript); + } +} diff --git a/shockwave_plus/src/polynomial/blinder_poly.rs b/shockwave_plus/src/polynomial/blinder_poly.rs new file mode 100644 index 0000000..fcd7560 --- /dev/null +++ b/shockwave_plus/src/polynomial/blinder_poly.rs @@ -0,0 +1,34 @@ +use crate::FieldExt; + +pub struct BlinderPoly { + inner_poly_coeffs: Vec>, +} + +impl BlinderPoly { + pub fn sample_random(num_vars: usize, degree: usize) -> Self { + let mut rng = rand::thread_rng(); + let inner_poly_coeffs = (0..num_vars) + .map(|_| (0..(degree + 1)).map(|_| F::random(&mut rng)).collect()) + .collect(); + + Self { inner_poly_coeffs } + } + + pub fn eval(&self, x: &[F]) -> F { + let mut res = F::ZERO; + + for (coeffs, x_i) in self.inner_poly_coeffs.iter().zip(x.iter()) { + let mut tmp = F::ZERO; + let mut x_i_pow = F::ONE; + + for coeff in coeffs.iter() { + tmp += *coeff * x_i_pow; + x_i_pow *= x_i; + } + + res += tmp; + } + + res + } +} diff --git a/shockwave_plus/src/polynomial/ml_poly.rs b/shockwave_plus/src/polynomial/ml_poly.rs new file mode 100644 index 0000000..2cf32be --- /dev/null +++ b/shockwave_plus/src/polynomial/ml_poly.rs @@ -0,0 +1,39 @@ +use tensor_pcs::EqPoly; + +use crate::FieldExt; + +#[derive(Clone, Debug)] +pub struct MlPoly { + pub evals: Vec, + pub num_vars: usize, +} + +impl MlPoly { + pub fn new(evals: Vec) -> Self { + assert!(evals.len().is_power_of_two()); + let num_vars = (evals.len() as f64).log2() as usize; + Self { evals, num_vars } + } + + fn dot_prod(x: &[F], y: &[F]) -> F { + assert_eq!(x.len(), y.len()); + let mut result = F::ZERO; + for i in 0..x.len() { + result += x[i] * y[i]; + } + result + } + + // Evaluate the multilinear extension of the polynomial `a`, at point `t`. + // `a` is in evaluation form. + pub fn eval(&self, t: &[F]) -> F { + let n = self.evals.len(); + debug_assert_eq!((n as f64).log2() as usize, t.len()); + // Evaluate the multilinear extension of the polynomial `a`, + // over the boolean hypercube + + let eq_evals = EqPoly::new(t.to_vec()).evals(); + + Self::dot_prod(&self.evals, &eq_evals) + } +} diff --git a/shockwave_plus/src/polynomial/mod.rs b/shockwave_plus/src/polynomial/mod.rs new file mode 100644 index 0000000..afb6a6d --- /dev/null +++ b/shockwave_plus/src/polynomial/mod.rs @@ -0,0 +1,2 @@ +pub mod blinder_poly; +pub mod ml_poly; diff --git a/shockwave_plus/src/r1cs/mod.rs b/shockwave_plus/src/r1cs/mod.rs new file mode 100644 index 0000000..4e9c9e8 --- /dev/null +++ b/shockwave_plus/src/r1cs/mod.rs @@ -0,0 +1,3 @@ +pub mod r1cs; + +pub use r1cs::R1CS; diff --git a/shockwave_plus/src/r1cs/r1cs.rs b/shockwave_plus/src/r1cs/r1cs.rs new file mode 100644 index 0000000..d25e6b2 --- /dev/null +++ b/shockwave_plus/src/r1cs/r1cs.rs @@ -0,0 +1,306 @@ +use crate::FieldExt; +use halo2curves::ff::Field; +use tensor_pcs::SparseMLPoly; + +#[derive(Clone)] +pub struct SparseMatrixEntry { + pub row: usize, + pub col: usize, + pub val: F, +} + +#[derive(Clone)] +pub struct Matrix { + pub entries: Vec>, + pub num_cols: usize, + pub num_rows: usize, +} + +impl Matrix +where + F: FieldExt, +{ + pub fn new(entries: Vec>, num_cols: usize, num_rows: usize) -> Self { + assert!((num_cols * num_rows).is_power_of_two()); + Self { + entries, + num_cols, + num_rows, + } + } + + pub fn mul_vector(&self, num_rows: usize, vec: &[F]) -> Vec { + let mut result = vec![F::ZERO; num_rows]; + let entries = &self.entries; + for i in 0..entries.len() { + let row = entries[i].row; + let col = entries[i].col; + let val = entries[i].val; + result[row] += val * vec[col]; + } + result + } + + // Return a multilinear extension of the matrix + // with num_vars * num_vars entries + pub fn to_ml_extension(&self) -> SparseMLPoly { + let mut evals = Vec::with_capacity(self.entries.len()); + let entries = &self.entries; + let num_cols = self.num_cols; + for i in 0..entries.len() { + let row = entries[i].row; + let col = entries[i].col; + let val = entries[i].val; + evals.push(((row * num_cols) + col, val)); + } + let ml_poly_num_vars = ((self.num_cols * self.num_rows) as f64).log2() as usize; + let ml_poly = SparseMLPoly::new(evals, ml_poly_num_vars); + ml_poly + } + + /* + pub fn fast_to_coeffs(&self, s: usize, x: F) -> Vec { + let mut result = F::ZERO; + for entry in &self.0 { + let row = entry.0; + let col = entry.1; + let val = entry.2; + + let index = row * 2usize.pow(s as u32) + col; + // Get the degrees of the nonzero coefficients + // Tensor product (1 - x_0)(1 - x_1) + let base = index; + let zero_bits = degree & !base; + + let mut zero_bit_degrees = vec![]; + for j in 0..s { + if zero_bits & (1 << j) != 0 { + zero_bit_degrees.push(j); + } + } + + let mut term = val; + for degree in zero_bit_degrees { + term *= x.pow(&[base as u64, 0, 0, 0]) - x.pow(&[(degree + base) as u64, 0, 0, 0]); + } + result += term; + } + result + } + */ + + /* + pub fn fast_uni_eval(&self, s: usize, x: F) -> F { + let degree = 2usize.pow(s as u32); + + let mut result = F::ZERO; + for entry in &self.0 { + let row = entry.0; + let col = entry.1; + let val = entry.2; + + let index = row * 2usize.pow(s as u32) + col; + // Get the degrees of the nonzero coefficients + // Tensor product (1 - x_0)(1 - x_1) + let base = index; + let zero_bits = degree & !base; + + let mut zero_bit_degrees = vec![]; + for j in 0..s { + if zero_bits & (1 << j) != 0 { + zero_bit_degrees.push(j); + } + } + + let mut term = val; + for degree in zero_bit_degrees { + term *= x.pow(&[base as u64, 0, 0, 0]) - x.pow(&[(degree + base) as u64, 0, 0, 0]); + } + result += term; + } + result + } + */ +} + +#[derive(Clone)] +pub struct R1CS +where + F: FieldExt, +{ + pub A: Matrix, + pub B: Matrix, + pub C: Matrix, + pub public_input: Vec, + pub num_cons: usize, + pub num_vars: usize, + pub num_input: usize, +} + +impl R1CS +where + F: FieldExt, +{ + pub fn hadamard_prod(a: &[F], b: &[F]) -> Vec { + assert_eq!(a.len(), b.len()); + let mut result = vec![F::ZERO; a.len()]; + for i in 0..a.len() { + result[i] = a[i] * b[i]; + } + result + } + + pub fn produce_synthetic_r1cs( + num_cons: usize, + num_vars: usize, + num_input: usize, + ) -> (Self, Vec) { + // assert_eq!(num_cons, num_vars); + let mut public_input = Vec::with_capacity(num_input); + let mut witness = Vec::with_capacity(num_vars); + + for i in 0..num_input { + public_input.push(F::from((i + 1) as u64)); + } + + for i in 0..num_vars { + witness.push(F::from((i + 1) as u64)); + } + + let z: Vec = vec![public_input.clone(), witness.clone()].concat(); + + let mut A_entries: Vec> = vec![]; + let mut B_entries: Vec> = vec![]; + let mut C_entries: Vec> = vec![]; + + for i in 0..num_cons { + let A_col = i % num_vars; + let B_col = (i + 1) % num_vars; + let C_col = (i + 2) % num_vars; + + // For the i'th constraint, + // add the value 1 at the (i % num_vars)th column of A, B. + // Compute the corresponding C_column value so that A_i * B_i = C_i + // we apply multiplication since the Hadamard product is computed for Az ・ Bz, + + // We only _enable_ a single variable in each constraint. + A_entries.push(SparseMatrixEntry { + row: i, + col: A_col, + val: F::ONE, + }); + B_entries.push(SparseMatrixEntry { + row: i, + col: B_col, + val: F::ONE, + }); + C_entries.push(SparseMatrixEntry { + row: i, + col: C_col, + val: (z[A_col] * z[B_col]) * z[C_col].invert().unwrap(), + }); + } + + let A = Matrix::new(A_entries, num_vars, num_cons); + + let B = Matrix::new(B_entries, num_vars, num_cons); + + let C = Matrix::new(C_entries, num_vars, num_cons); + + ( + Self { + A, + B, + C, + public_input, + num_cons, + num_vars, + num_input, + }, + witness, + ) + } + + pub fn is_sat(&self, witness: &Vec, public_input: &Vec) -> bool { + let mut z = Vec::with_capacity(witness.len() + public_input.len() + 1); + z.extend(public_input); + z.extend(witness); + + let Az = self.A.mul_vector(self.num_cons, &z); + let Bz = self.B.mul_vector(self.num_cons, &z); + let Cz = self.C.mul_vector(self.num_cons, &z); + + Self::hadamard_prod(&Az, &Bz) == Cz + } +} + +#[cfg(test)] +mod tests { + use crate::utils::boolean_hypercube; + + use super::*; + type F = halo2curves::secp256k1::Fp; + use crate::polynomial::ml_poly::MlPoly; + + #[test] + fn test_r1cs() { + let num_cons = 2usize.pow(5); + let num_vars = num_cons; + let num_input = 0; + + let (r1cs, mut witness) = R1CS::::produce_synthetic_r1cs(num_cons, num_vars, num_input); + + assert_eq!(witness.len(), num_vars); + assert_eq!(r1cs.public_input.len(), num_input); + + assert!(r1cs.is_sat(&witness, &r1cs.public_input)); + + // Should assert if the witness is invalid + witness[0] = witness[0] + F::one(); + assert!(r1cs.is_sat(&r1cs.public_input, &witness) == false); + witness[0] = witness[0] - F::one(); + + /* + // Should assert if the public input is invalid + let mut public_input = r1cs.public_input.clone(); + public_input[0] = public_input[0] + F::one(); + assert!(r1cs.is_sat(&witness, &public_input) == false); + */ + + // Test MLE + let s = (num_vars as f64).log2() as usize; + let A_mle = r1cs.A.to_ml_extension(); + let B_mle = r1cs.B.to_ml_extension(); + let C_mle = r1cs.C.to_ml_extension(); + let Z_mle = MlPoly::new(witness); + + for c in &boolean_hypercube(s) { + let mut eval_a = F::zero(); + let mut eval_b = F::zero(); + let mut eval_c = F::zero(); + for b in &boolean_hypercube(s) { + let mut b_rev = b.clone(); + b_rev.reverse(); + let z_eval = Z_mle.eval(&b_rev); + let mut eval_matrix = [b.as_slice(), c.as_slice()].concat(); + eval_matrix.reverse(); + eval_a += A_mle.eval(&eval_matrix) * z_eval; + eval_b += B_mle.eval(&eval_matrix) * z_eval; + eval_c += C_mle.eval(&eval_matrix) * z_eval; + } + let eval_con = eval_a * eval_b - eval_c; + assert_eq!(eval_con, F::zero()); + } + } + + /* + #[test] + fn test_fast_uni_eval() { + let (r1cs, _) = R1CS::::produce_synthetic_r1cs(8, 8, 0); + + let eval_at = F::from(33); + let result = r1cs.A.fast_uni_eval(r1cs.num_vars, eval_at); + println!("result: {:?}", result); + } + */ +} diff --git a/shockwave_plus/src/sumcheck/mod.rs b/shockwave_plus/src/sumcheck/mod.rs new file mode 100644 index 0000000..45c37fc --- /dev/null +++ b/shockwave_plus/src/sumcheck/mod.rs @@ -0,0 +1,6 @@ +mod sc_phase_1; +mod sc_phase_2; +pub mod unipoly; + +pub use sc_phase_1::{SCPhase1Proof, SumCheckPhase1}; +pub use sc_phase_2::{SCPhase2Proof, SumCheckPhase2}; diff --git a/shockwave_plus/src/sumcheck/sc_phase_1.rs b/shockwave_plus/src/sumcheck/sc_phase_1.rs new file mode 100644 index 0000000..0a4e627 --- /dev/null +++ b/shockwave_plus/src/sumcheck/sc_phase_1.rs @@ -0,0 +1,175 @@ +use crate::polynomial::ml_poly::MlPoly; +use crate::sumcheck::unipoly::UniPoly; +use serde::{Deserialize, Serialize}; +use tensor_pcs::{EqPoly, Transcript}; + +use crate::FieldExt; + +#[derive(Serialize, Deserialize)] +pub struct SCPhase1Proof { + pub blinder_poly_sum: F, + pub blinder_poly_eval_claim: F, + pub round_polys: Vec>, +} + +pub struct SumCheckPhase1 { + Az_evals: Vec, + Bz_evals: Vec, + Cz_evals: Vec, + bound_eq_poly: EqPoly, + challenge: Vec, +} + +impl SumCheckPhase1 { + pub fn new( + Az_evals: Vec, + Bz_evals: Vec, + Cz_evals: Vec, + tau: Vec, + challenge: Vec, + ) -> Self { + let bound_eq_poly = EqPoly::new(tau); + Self { + Az_evals, + Bz_evals, + Cz_evals, + bound_eq_poly, + challenge, + } + } + + pub fn prove(&self, transcript: &mut Transcript) -> (SCPhase1Proof, (F, F, F)) { + let num_vars = (self.Az_evals.len() as f64).log2() as usize; + let mut round_polys = Vec::>::with_capacity(num_vars - 1); + + let mut rng = rand::thread_rng(); + // Sample a blinding polynomial g(x_1, ..., x_m) of degree 3 + let random_evals = (0..2usize.pow(num_vars as u32)) + .map(|_| F::random(&mut rng)) + .collect::>(); + let blinder_poly_sum = random_evals.iter().fold(F::ZERO, |acc, x| acc + x); + let blinder_poly = MlPoly::new(random_evals); + + transcript.append_fe(&blinder_poly_sum); + let rho = transcript.challenge_fe(); + + // Compute the sum of g(x_1, ... x_m) over the boolean hypercube + + // Do the sum check for f + \rho g + + let mut A_table = self.Az_evals.clone(); + let mut B_table = self.Bz_evals.clone(); + let mut C_table = self.Cz_evals.clone(); + let mut blinder_table = blinder_poly.evals.clone(); + let mut eq_table = self.bound_eq_poly.evals(); + + let zero = F::ZERO; + let one = F::ONE; + let two = F::from(2); + let three = F::from(3); + + for j in 0..num_vars { + let r_i = self.challenge[j]; + + let high_index = 2usize.pow((num_vars - j - 1) as u32); + + let mut evals = [F::ZERO; 4]; + + // https://eprint.iacr.org/2019/317.pdf#subsection.3.2 + for b in 0..high_index { + for (i, eval_at) in [zero, one, two, three].iter().enumerate() { + let a_eval = A_table[b] + (A_table[b + high_index] - A_table[b]) * eval_at; + let b_eval = B_table[b] + (B_table[b + high_index] - B_table[b]) * eval_at; + let c_eval = C_table[b] + (C_table[b + high_index] - C_table[b]) * eval_at; + let eq_eval = eq_table[b] + (eq_table[b + high_index] - eq_table[b]) * eval_at; + let blinder_eval = blinder_table[b] + + (blinder_table[b + high_index] - blinder_table[b]) * eval_at; + evals[i] += ((a_eval * b_eval - c_eval) * eq_eval) + rho * blinder_eval; + } + + A_table[b] = A_table[b] + (A_table[b + high_index] - A_table[b]) * r_i; + B_table[b] = B_table[b] + (B_table[b + high_index] - B_table[b]) * r_i; + C_table[b] = C_table[b] + (C_table[b + high_index] - C_table[b]) * r_i; + eq_table[b] = eq_table[b] + (eq_table[b + high_index] - eq_table[b]) * r_i; + blinder_table[b] = + blinder_table[b] + (blinder_table[b + high_index] - blinder_table[b]) * r_i; + } + + let round_poly = UniPoly::interpolate(&evals); + + round_polys.push(round_poly); + } + + let v_A = A_table[0]; + let v_B = B_table[0]; + let v_C = C_table[0]; + + let rx = self.challenge.clone(); + let blinder_poly_eval_claim = blinder_poly.eval(&rx); + + // Prove the evaluation of the blinder polynomial at rx. + + ( + SCPhase1Proof { + blinder_poly_sum, + round_polys, + blinder_poly_eval_claim, + }, + (v_A, v_B, v_C), + ) + } + + pub fn verify_round_polys(proof: &SCPhase1Proof, challenge: &[F], rho: F) -> F { + debug_assert_eq!(proof.round_polys.len(), challenge.len()); + + let zero = F::ZERO; + let one = F::ONE; + + // target = 0 + rho * blinder_poly_sum + let mut target = rho * proof.blinder_poly_sum; + for (i, round_poly) in proof.round_polys.iter().enumerate() { + assert_eq!( + round_poly.eval(zero) + round_poly.eval(one), + target, + "round poly {} failed", + i + ); + target = round_poly.eval(challenge[i]); + } + + target + } +} + +#[cfg(test)] +mod tests { + use super::*; + use halo2curves::secp256k1::Fp; + type F = Fp; + use halo2curves::ff::Field; + + #[test] + fn test_unipoly_3() { + let coeffs = [F::from(1u64), F::from(2u64), F::from(3u64), F::from(4u64)]; + let eval_at = Fp::from(33); + + let mut expected_eval = F::ZERO; + for i in 0..coeffs.len() { + expected_eval += coeffs[i] * eval_at.pow(&[3 - i as u64, 0, 0, 0]); + } + + let mut evals = [F::ZERO; 4]; + for i in 0..4 { + let eval_at = F::from(i as u64); + let mut eval_i = F::ZERO; + for j in 0..coeffs.len() { + eval_i += coeffs[j] * eval_at.pow(&[3 - j as u64, 0, 0, 0]); + } + evals[i] = eval_i; + } + + let uni_poly = UniPoly::interpolate(&evals); + let eval = uni_poly.eval(eval_at); + assert_eq!(eval, expected_eval); + } +} diff --git a/shockwave_plus/src/sumcheck/sc_phase_2.rs b/shockwave_plus/src/sumcheck/sc_phase_2.rs new file mode 100644 index 0000000..a5c6e70 --- /dev/null +++ b/shockwave_plus/src/sumcheck/sc_phase_2.rs @@ -0,0 +1,184 @@ +use crate::polynomial::ml_poly::MlPoly; +use crate::r1cs::r1cs::Matrix; +use crate::sumcheck::unipoly::UniPoly; +use crate::FieldExt; +use serde::{Deserialize, Serialize}; +use tensor_pcs::{EqPoly, Transcript}; + +#[derive(Serialize, Deserialize)] +pub struct SCPhase2Proof { + pub round_polys: Vec>, + pub blinder_poly_sum: F, + pub blinder_poly_eval_claim: F, +} + +pub struct SumCheckPhase2 { + A_mat: Matrix, + B_mat: Matrix, + C_mat: Matrix, + Z_evals: Vec, + rx: Vec, + r: [F; 3], + challenge: Vec, +} + +impl SumCheckPhase2 { + pub fn new( + A_mat: Matrix, + B_mat: Matrix, + C_mat: Matrix, + Z_evals: Vec, + rx: Vec, + r: [F; 3], + challenge: Vec, + ) -> Self { + Self { + A_mat, + B_mat, + C_mat, + Z_evals, + rx, + r, + challenge, + } + } + + pub fn prove(&self, transcript: &mut Transcript) -> SCPhase2Proof { + let r_A = self.r[0]; + let r_B = self.r[1]; + let r_C = self.r[2]; + + let n = self.Z_evals.len(); + let num_vars = (self.Z_evals.len() as f64).log2() as usize; + + let evals_rx = EqPoly::new(self.rx.clone()).evals(); + let mut A_evals = vec![F::ZERO; n]; + let mut B_evals = vec![F::ZERO; n]; + let mut C_evals = vec![F::ZERO; n]; + + for entry in &self.A_mat.entries { + A_evals[entry.col] += evals_rx[entry.row] * entry.val; + } + for entry in &self.B_mat.entries { + B_evals[entry.col] += evals_rx[entry.row] * entry.val; + } + for entry in &self.C_mat.entries { + C_evals[entry.col] += evals_rx[entry.row] * entry.val; + } + + let mut rng = rand::thread_rng(); + // Sample a blinding polynomial g(x_1, ..., x_m) of degree 3 + let random_evals = (0..2usize.pow(num_vars as u32)) + .map(|_| F::random(&mut rng)) + .collect::>(); + let blinder_poly_sum = random_evals.iter().fold(F::ZERO, |acc, x| acc + x); + let blinder_poly = MlPoly::new(random_evals); + + transcript.append_fe(&blinder_poly_sum); + let rho = transcript.challenge_fe(); + + let mut round_polys: Vec> = Vec::>::with_capacity(num_vars); + + let mut A_table = A_evals.clone(); + let mut B_table = B_evals.clone(); + let mut C_table = C_evals.clone(); + let mut Z_table = self.Z_evals.clone(); + let mut blinder_table = blinder_poly.evals.clone(); + + let zero = F::ZERO; + let one = F::ONE; + let two = F::from(2); + + for j in 0..num_vars { + let high_index = 2usize.pow((num_vars - j - 1) as u32); + let mut evals = [F::ZERO; 3]; + + for b in 0..high_index { + let r_y_i = self.challenge[j]; + for (i, eval_at) in [zero, one, two].iter().enumerate() { + let a_eval = A_table[b] + (A_table[b + high_index] - A_table[b]) * eval_at; + let b_eval = B_table[b] + (B_table[b + high_index] - B_table[b]) * eval_at; + let c_eval = C_table[b] + (C_table[b + high_index] - C_table[b]) * eval_at; + let z_eval = Z_table[b] + (Z_table[b + high_index] - Z_table[b]) * eval_at; + let blinder_eval = blinder_table[b] + + (blinder_table[b + high_index] - blinder_table[b]) * eval_at; + evals[i] += + (a_eval * r_A + b_eval * r_B + c_eval * r_C) * z_eval + rho * blinder_eval; + } + + A_table[b] = A_table[b] + (A_table[b + high_index] - A_table[b]) * r_y_i; + B_table[b] = B_table[b] + (B_table[b + high_index] - B_table[b]) * r_y_i; + C_table[b] = C_table[b] + (C_table[b + high_index] - C_table[b]) * r_y_i; + Z_table[b] = Z_table[b] + (Z_table[b + high_index] - Z_table[b]) * r_y_i; + blinder_table[b] = + blinder_table[b] + (blinder_table[b + high_index] - blinder_table[b]) * r_y_i; + } + + let round_poly = UniPoly::interpolate(&evals); + round_polys.push(round_poly); + } + + let mut r_y_rev = self.challenge.clone(); + let blinder_poly_eval_claim = blinder_poly.eval(&r_y_rev); + + SCPhase2Proof { + round_polys, + blinder_poly_eval_claim, + blinder_poly_sum, + } + } + + pub fn verify_round_polys(sum_target: F, proof: &SCPhase2Proof, challenge: &[F]) -> F { + debug_assert_eq!(proof.round_polys.len(), challenge.len()); + + let zero = F::ZERO; + let one = F::ONE; + + let mut target = sum_target; + for (i, round_poly) in proof.round_polys.iter().enumerate() { + assert_eq!( + round_poly.eval(zero) + round_poly.eval(one), + target, + "i = {}", + i + ); + + target = round_poly.eval(challenge[i]); + } + + target + } +} + +#[cfg(test)] +mod tests { + use super::*; + use halo2curves::ff::Field; + use halo2curves::secp256k1::Fp; + type F = Fp; + + #[test] + fn test_unipoly_2() { + let coeffs = [F::from(1u64), F::from(2u64), F::from(3u64)]; + let eval_at = Fp::from(33); + + let mut expected_eval = F::ZERO; + for i in 0..coeffs.len() { + expected_eval += coeffs[i] * eval_at.pow(&[i as u64, 0, 0, 0]); + } + + let mut evals = [F::ZERO; 3]; + for i in 0..3 { + let eval_at = F::from(i as u64); + let mut eval_i = F::ZERO; + for j in 0..coeffs.len() { + eval_i += coeffs[j] * eval_at.pow(&[j as u64, 0, 0, 0]); + } + evals[i] = eval_i; + } + + let uni_poly = UniPoly::interpolate(&evals); + let eval = uni_poly.eval(eval_at); + assert_eq!(eval, expected_eval); + } +} diff --git a/shockwave_plus/src/sumcheck/unipoly.rs b/shockwave_plus/src/sumcheck/unipoly.rs new file mode 100644 index 0000000..30d2a5f --- /dev/null +++ b/shockwave_plus/src/sumcheck/unipoly.rs @@ -0,0 +1,81 @@ +use crate::FieldExt; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct UniPoly { + pub coeffs: Vec, +} + +impl UniPoly { + fn eval_cubic(&self, x: F) -> F { + // ax^3 + bx^2 + cx + d + let x_sq = x.square(); + let x_cub = x_sq * x; + + let a = self.coeffs[0]; + let b = self.coeffs[1]; + let c = self.coeffs[2]; + let d = self.coeffs[3]; + + a * x_cub + b * x_sq + c * x + d + } + + fn eval_quadratic(&self, x: F) -> F { + // ax^3 + bx^2 + cx + d + let x_sq = x.square(); + + let a = self.coeffs[0]; + let b = self.coeffs[1]; + let c = self.coeffs[2]; + + a * x_sq + b * x + c + } + + pub fn eval(&self, x: F) -> F { + if self.coeffs.len() == 3 { + self.eval_quadratic(x) + } else { + self.eval_cubic(x) + } + } + + pub fn interpolate(evals: &[F]) -> Self { + debug_assert!( + evals.len() == 4 || evals.len() == 3, + "Only cubic and quadratic polynomials are supported" + ); + + let two_inv = F::TWO_INV; + + if evals.len() == 4 { + // ax^3 + bx^2 + cx + d + let six_inv = F::from(6u64).invert().unwrap(); + + let d = evals[0]; + let a = six_inv + * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] + - evals[0]); + let b = two_inv + * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] + + evals[2] + + evals[2] + + evals[2] + + evals[2] + - evals[3]); + + let c = evals[1] - d - a - b; + + Self { + coeffs: vec![a, b, c, d], + } + } else { + let c = evals[0]; + let a = (evals[2] - evals[1] - evals[1] + evals[0]) * two_inv; + let b = evals[1] - a - c; + + Self { + coeffs: vec![a, b, c], + } + } + } +} diff --git a/shockwave_plus/src/utils.rs b/shockwave_plus/src/utils.rs new file mode 100644 index 0000000..b85c5b8 --- /dev/null +++ b/shockwave_plus/src/utils.rs @@ -0,0 +1,19 @@ +use crate::FieldExt; + +// Returns a vector of vectors of length m, where each vector is a boolean vector (little endian) +pub fn boolean_hypercube(m: usize) -> Vec> { + let n = 2usize.pow(m as u32); + + let mut boolean_hypercube = Vec::>::with_capacity(n); + + for i in 0..n { + let mut tmp = Vec::with_capacity(m); + for j in 0..m { + let i_b = F::from((i >> j & 1) as u64); + tmp.push(i_b); + } + boolean_hypercube.push(tmp); + } + + boolean_hypercube +} diff --git a/tensor_pcs/Cargo.toml b/tensor_pcs/Cargo.toml new file mode 100644 index 0000000..604e3d6 --- /dev/null +++ b/tensor_pcs/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "tensor-pcs" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rand = "0.8.5" +serde = { version = "1.0.152", features = ["derive"] } +merlin = "3.0.0" +ecfft = { git = "https://github.com/DanTehrani/ecfft" } +tiny-keccak = { version = "2.0.2", features = ["keccak"] } +halo2curves = "0.1.0" + +[dev-dependencies] +criterion = { version = "0.4", features = ["html_reports"] } + +[[bench]] +name = "prove" +harness = false \ No newline at end of file diff --git a/tensor_pcs/benches/prove.rs b/tensor_pcs/benches/prove.rs new file mode 100644 index 0000000..7fc75cf --- /dev/null +++ b/tensor_pcs/benches/prove.rs @@ -0,0 +1,93 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use tensor_pcs::{ + rs_config, FieldExt, SparseMLPoly, TensorMultilinearPCS, TensorRSMultilinearPCSConfig, + Transcript, +}; + +fn poly(num_vars: usize) -> SparseMLPoly { + let num_entries: usize = 2usize.pow(num_vars as u32); + + let evals = (0..num_entries) + .map(|i| (i, F::from(i as u64))) + .collect::>(); + + let ml_poly = SparseMLPoly::new(evals, num_vars); + ml_poly +} + +fn config_base(ml_poly: &SparseMLPoly) -> TensorRSMultilinearPCSConfig { + let num_vars = ml_poly.num_vars; + let num_evals = 2usize.pow(num_vars as u32); + let num_rows = 2usize.pow((num_vars / 2) as u32); + + let expansion_factor = 2; + + TensorRSMultilinearPCSConfig:: { + expansion_factor, + domain_powers: None, + fft_domain: None, + ecfft_config: None, + l: 10, + num_entries: num_evals, + num_rows, + } +} + +fn pcs_fft_bench(c: &mut Criterion) { + type F = halo2curves::pasta::Fp; + + let num_vars = 13; + let ml_poly = poly(num_vars); + let open_at = (0..ml_poly.num_vars) + .map(|i| F::from(i as u64)) + .collect::>(); + + let mut config = config_base(&ml_poly); + config.fft_domain = Some(rs_config::smooth::gen_config::(config.num_cols())); + + let mut group = c.benchmark_group("pcs fft"); + group.bench_function("prove", |b| { + b.iter(|| { + let pcs = TensorMultilinearPCS::::new(config.clone()); + + let mut transcript = Transcript::new(b"bench"); + let comm = pcs.commit(&black_box(ml_poly.clone())); + pcs.open(&comm, &ml_poly, &open_at, &mut transcript); + }) + }); +} + +fn pcs_ecfft_bench(c: &mut Criterion) { + type F = halo2curves::secp256k1::Fp; + + let num_vars = 13; + let ml_poly = poly(num_vars); + let open_at = (0..ml_poly.num_vars) + .map(|i| F::from(i as u64)) + .collect::>(); + + let mut config = config_base(&ml_poly); + config.ecfft_config = Some(rs_config::ecfft::gen_config::(config.num_cols())); + + let mut group = c.benchmark_group("pcs ecfft"); + group.bench_function("prove", |b| { + b.iter(|| { + let pcs = TensorMultilinearPCS::::new(config.clone()); + + let mut transcript = Transcript::new(b"bench"); + let comm = pcs.commit(&black_box(ml_poly.clone())); + pcs.open(&comm, &ml_poly, &open_at, &mut transcript); + }) + }); +} + +fn set_duration() -> Criterion { + Criterion::default().sample_size(10) +} + +criterion_group! { + name = benches; + config = set_duration(); + targets = pcs_fft_bench, pcs_ecfft_bench +} +criterion_main!(benches); diff --git a/tensor_pcs/src/fft.rs b/tensor_pcs/src/fft.rs new file mode 100644 index 0000000..d47665b --- /dev/null +++ b/tensor_pcs/src/fft.rs @@ -0,0 +1,118 @@ +use crate::FieldExt; +use halo2curves::ff::Field; +use std::vec; + +pub fn fft(coeffs: &[F], domain: &[F]) -> Vec +where + F: FieldExt, +{ + debug_assert_eq!(coeffs.len(), domain.len()); + if coeffs.len() == 1 { + return coeffs.to_vec(); + } + + // TODO: Just borrow the values + // Split into evens and odds + let L = coeffs + .iter() + .enumerate() + .filter(|(i, _)| i % 2 == 0) + .map(|(_, x)| *x) + .collect::>(); + + let R = coeffs + .iter() + .enumerate() + .filter(|(i, _)| i % 2 == 1) + .map(|(_, x)| *x) + .collect::>(); + + // Square the domain values + let domain_squared: Vec = (0..(domain.len() / 2)).map(|i| domain[i * 2]).collect(); + + let fft_e = fft(&L, &domain_squared); + let fft_o = fft(&R, &domain_squared); + + let mut evals_L = vec![]; + let mut evals_R = vec![]; + for i in 0..(coeffs.len() / 2) { + // We can use the previous evaluations to create a list of evaluations + // of the domain + evals_L.push(fft_e[i] + fft_o[i] * domain[i]); + evals_R.push(fft_e[i] - fft_o[i] * domain[i]); + } + + evals_L.extend(evals_R); + return evals_L; +} + +pub fn ifft(domain: &[F], evals: &[F]) -> Vec { + let mut coeffs = vec![]; + let len_mod_inv = F::from(domain.len() as u64).invert().unwrap(); + let vals = fft(&evals, &domain); + + coeffs.push(vals[0] * len_mod_inv); + for val in vals[1..].iter().rev() { + coeffs.push(*val * len_mod_inv); + } + + coeffs +} + +#[cfg(test)] +mod tests { + use halo2curves::ff::PrimeField; + use halo2curves::pasta::Fp; + + use super::*; + #[test] + fn test_fft_ifft() { + // f(x) = 1 + 2x + 3x^2 + 4x^3 + let mut coeffs = vec![ + Fp::from(1), + Fp::from(2), + Fp::from(3), + Fp::from(4), + Fp::from(5), + Fp::from(6), + Fp::from(7), + Fp::from(81), + ]; + + let mut domain = vec![]; + + let root_of_unity = Fp::ROOT_OF_UNITY; + + let subgroup_order = (coeffs.len() * 2).next_power_of_two(); + + coeffs.resize(subgroup_order, Fp::ZERO); + + // Generator for the subgroup with order _subgroup_order_ in the field + let generator = root_of_unity.pow(&[ + 2u32.pow(32 - ((subgroup_order as f64).log2() as u32)) as u64, + 0, + 0, + 0, + ]); + + for i in 0..(subgroup_order) { + domain.push(generator.pow(&[i as u64, 0, 0, 0])); + } + + let mut expected_evals = vec![]; + + for w in &domain { + let mut eval = Fp::ZERO; + for (i, coeff) in (&coeffs).iter().enumerate() { + eval += *coeff * w.pow(&[i as u64, 0, 0, 0]); + } + expected_evals.push(eval); + } + + let evals = fft(&coeffs, &domain); + debug_assert!(evals == expected_evals); + + let recovered_coeffs = ifft(&domain, &evals); + debug_assert!(recovered_coeffs == coeffs); + } +} diff --git a/tensor_pcs/src/lib.rs b/tensor_pcs/src/lib.rs new file mode 100644 index 0000000..83b3e6f --- /dev/null +++ b/tensor_pcs/src/lib.rs @@ -0,0 +1,19 @@ +mod fft; +mod polynomial; +pub mod rs_config; +mod tensor_code; +mod tensor_pcs; +mod transcript; +mod tree; +mod utils; +use halo2curves::ff::FromUniformBytes; + +pub trait FieldExt: FromUniformBytes<64, Repr = [u8; 32]> {} + +impl FieldExt for halo2curves::secp256k1::Fp {} +impl FieldExt for halo2curves::pasta::Fp {} + +pub use polynomial::eq_poly::EqPoly; +pub use polynomial::sparse_ml_poly::SparseMLPoly; +pub use tensor_pcs::{TensorMLOpening, TensorMultilinearPCS, TensorRSMultilinearPCSConfig}; +pub use transcript::{AppendToTranscript, Transcript}; diff --git a/tensor_pcs/src/polynomial/eq_poly.rs b/tensor_pcs/src/polynomial/eq_poly.rs new file mode 100644 index 0000000..765181a --- /dev/null +++ b/tensor_pcs/src/polynomial/eq_poly.rs @@ -0,0 +1,68 @@ +use crate::FieldExt; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct EqPoly { + t: Vec, +} + +impl EqPoly { + pub fn new(t: Vec) -> Self { + Self { t } + } + + pub fn eval(&self, x: &[F]) -> F { + let mut result = F::ONE; + let one = F::ONE; + + for i in 0..x.len() { + result *= self.t[i] * x[i] + (one - self.t[i]) * (one - x[i]); + } + result + } + + // Copied from microsoft/Spartan + pub fn evals(&self) -> Vec { + let ell = self.t.len(); // 4 + + let mut evals: Vec = vec![F::ONE; 2usize.pow(ell as u32)]; + let mut size = 1; + for j in 0..ell { + // in each iteration, we double the size of chis + size *= 2; // 2 4 8 16 + for i in (0..size).rev().step_by(2) { + // copy each element from the prior iteration twice + let scalar = evals[i / 2]; // i = 0, 2, 4, 7 + evals[i] = scalar * self.t[j]; // (1 * t0)(1 * t1) + evals[i - 1] = scalar - evals[i]; // 1 - (1 * t0)(1 * t1) + } + } + evals + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::polynomial::sparse_ml_poly::SparseMLPoly; + use halo2curves::ff::Field; + + type F = halo2curves::secp256k1::Fp; + + pub fn dot_prod(x: &[F], y: &[F]) -> F { + assert_eq!(x.len(), y.len()); + let mut result = F::ZERO; + for i in 0..x.len() { + result += x[i] * y[i]; + } + result + } + + #[test] + fn test_eq_poly() { + let m = 4; + let t = (0..m).map(|i| F::from((i + 33) as u64)).collect::>(); + let eq_poly = EqPoly::new(t.clone()); + eq_poly.evals(); + } +} diff --git a/tensor_pcs/src/polynomial/mod.rs b/tensor_pcs/src/polynomial/mod.rs new file mode 100644 index 0000000..73643eb --- /dev/null +++ b/tensor_pcs/src/polynomial/mod.rs @@ -0,0 +1,2 @@ +pub mod eq_poly; +pub mod sparse_ml_poly; diff --git a/tensor_pcs/src/polynomial/sparse_ml_poly.rs b/tensor_pcs/src/polynomial/sparse_ml_poly.rs new file mode 100644 index 0000000..6b618c3 --- /dev/null +++ b/tensor_pcs/src/polynomial/sparse_ml_poly.rs @@ -0,0 +1,44 @@ +use crate::{EqPoly, FieldExt}; + +#[derive(Clone, Debug)] +pub struct SparseMLPoly { + pub evals: Vec<(usize, F)>, + pub num_vars: usize, +} + +impl SparseMLPoly { + pub fn new(evals: Vec<(usize, F)>, num_vars: usize) -> Self { + Self { evals, num_vars } + } + + pub fn from_dense(dense_evals: Vec) -> Self { + let sparse_evals = dense_evals + .iter() + .filter(|eval| **eval != F::ZERO) + .enumerate() + .map(|(i, eval)| (i, *eval)) + .collect::>(); + let num_vars = (dense_evals.len() as f64).log2() as usize; + + Self { + evals: sparse_evals, + num_vars, + } + } + + pub fn eval(&self, t: &[F]) -> F { + // Evaluate the multilinear extension of the polynomial `a`, + // over the boolean hypercube + + let eq_poly = EqPoly::new(t.to_vec()); + let eq_evals = eq_poly.evals(); + + let mut result = F::ZERO; + + for eval in &self.evals { + result += eq_evals[eval.0] * eval.1; + } + + result + } +} diff --git a/tensor_pcs/src/rs_config/ecfft.rs b/tensor_pcs/src/rs_config/ecfft.rs new file mode 100644 index 0000000..1031361 --- /dev/null +++ b/tensor_pcs/src/rs_config/ecfft.rs @@ -0,0 +1,27 @@ +use crate::FieldExt; +use ecfft::{prepare_domain, prepare_matrices, GoodCurve, Matrix2x2}; + +#[derive(Clone, Debug)] +pub struct ECFFTConfig { + pub domain: Vec>, + pub matrices: Vec>>, + pub inverse_matrices: Vec>>, +} + +pub fn gen_config(num_cols: usize) -> ECFFTConfig { + assert!(num_cols.is_power_of_two()); + let expansion_factor = 2; + let codeword_len = num_cols * expansion_factor; + + let k = (codeword_len as f64).log2() as usize; + + let good_curve = GoodCurve::find_k(k); + let domain = prepare_domain(good_curve); + let (matrices, inverse_matrices) = prepare_matrices(&domain); + + ECFFTConfig { + domain, + matrices, + inverse_matrices, + } +} diff --git a/tensor_pcs/src/rs_config/mod.rs b/tensor_pcs/src/rs_config/mod.rs new file mode 100644 index 0000000..b905438 --- /dev/null +++ b/tensor_pcs/src/rs_config/mod.rs @@ -0,0 +1,3 @@ +pub mod ecfft; +pub mod naive; +pub mod smooth; diff --git a/tensor_pcs/src/rs_config/naive.rs b/tensor_pcs/src/rs_config/naive.rs new file mode 100644 index 0000000..55960f1 --- /dev/null +++ b/tensor_pcs/src/rs_config/naive.rs @@ -0,0 +1,21 @@ +use crate::FieldExt; + +pub fn gen_config(num_cols: usize) -> Vec> { + assert!(num_cols.is_power_of_two()); + let expansion_factor = 2; + let codeword_len = num_cols * expansion_factor; + let domain = (0..codeword_len) + .map(|i| F::from((i + 3) as u64)) + .collect::>(); + + let mut domain_powers = Vec::with_capacity(codeword_len); + for eval_at in domain { + let mut powers_i = vec![F::ONE]; + for j in 0..(num_cols - 1) { + powers_i.push(powers_i[j] * eval_at); + } + domain_powers.push(powers_i); + } + + domain_powers +} diff --git a/tensor_pcs/src/rs_config/smooth.rs b/tensor_pcs/src/rs_config/smooth.rs new file mode 100644 index 0000000..16b3880 --- /dev/null +++ b/tensor_pcs/src/rs_config/smooth.rs @@ -0,0 +1,23 @@ +use crate::FieldExt; + +pub fn gen_config(num_cols: usize) -> Vec { + assert!(num_cols.is_power_of_two()); + let expansion_factor = 2; + let codeword_len = num_cols * expansion_factor; + + let domain_generator = F::ROOT_OF_UNITY.pow(&[ + 2u32.pow(32 - ((codeword_len as f64).log2() as u32)) as u64, + 0, + 0, + 0, + ]); + + // Compute the FFT domain + let mut fft_domain = Vec::with_capacity(codeword_len); + fft_domain.push(F::ONE); + for i in 0..(codeword_len - 1) { + fft_domain.push(fft_domain[i] * domain_generator); + } + + fft_domain +} diff --git a/tensor_pcs/src/tensor_code.rs b/tensor_pcs/src/tensor_code.rs new file mode 100644 index 0000000..2938b1d --- /dev/null +++ b/tensor_pcs/src/tensor_code.rs @@ -0,0 +1,42 @@ +use crate::tree::CommittedMerkleTree; +use crate::FieldExt; + +#[derive(Clone)] +pub struct TensorCode(pub Vec>) +where + F: FieldExt; + +impl TensorCode { + pub fn commit(&self, num_cols: usize, num_rows: usize) -> CommittedTensorCode { + // Flatten the tensor codeword in column major order + let mut tensor_codeword = vec![]; + for j in 0..(num_cols * 2) { + for i in 0..num_rows { + tensor_codeword.push(self.0[i][j]) + } + } + // Merkle commit the codewords + let committed_tree = CommittedMerkleTree::from_leaves(tensor_codeword, num_cols * 2); + + CommittedTensorCode { + committed_tree, + tensor_codeword: Self(self.0.clone()), + } + } +} + +#[derive(Clone)] +pub struct CommittedTensorCode { + pub committed_tree: CommittedMerkleTree, + pub tensor_codeword: TensorCode, +} + +impl CommittedTensorCode { + pub fn query_column(&self, column: usize, num_cols: usize) -> Vec { + let num_rows = self.tensor_codeword.0.len(); + + let leaves = + self.committed_tree.leaves[column * num_rows..((column + 1) * num_rows)].to_vec(); + leaves + } +} diff --git a/tensor_pcs/src/tensor_pcs.rs b/tensor_pcs/src/tensor_pcs.rs new file mode 100644 index 0000000..4053f58 --- /dev/null +++ b/tensor_pcs/src/tensor_pcs.rs @@ -0,0 +1,446 @@ +use crate::rs_config::ecfft::ECFFTConfig; +use crate::tree::BaseOpening; +use crate::FieldExt; +use ecfft::extend; +use serde::{Deserialize, Serialize}; + +use crate::fft::fft; +use crate::polynomial::eq_poly::EqPoly; +use crate::polynomial::sparse_ml_poly::SparseMLPoly; +use crate::tensor_code::TensorCode; +use crate::transcript::Transcript; +use crate::utils::{dot_prod, hash_all, rlc_rows, sample_indices}; + +use super::tensor_code::CommittedTensorCode; + +#[derive(Clone)] +pub struct TensorRSMultilinearPCSConfig { + pub expansion_factor: usize, + pub domain_powers: Option>>, + pub fft_domain: Option>, + pub ecfft_config: Option>, + pub l: usize, + pub num_entries: usize, + pub num_rows: usize, +} + +impl TensorRSMultilinearPCSConfig { + pub fn num_cols(&self) -> usize { + self.num_entries / self.num_rows() + } + + pub fn num_rows(&self) -> usize { + self.num_rows + } +} + +#[derive(Clone)] +pub struct TensorMultilinearPCS { + config: TensorRSMultilinearPCSConfig, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct TensorMLOpening { + pub x: Vec, + pub y: F, + pub base_opening: BaseOpening, + pub test_query_leaves: Vec>, + pub eval_query_leaves: Vec>, + u_hat_comm: [u8; 32], + pub test_u_prime: Vec, + pub test_r_prime: Vec, + pub eval_r_prime: Vec, + pub eval_u_prime: Vec, +} + +impl TensorMultilinearPCS { + pub fn new(config: TensorRSMultilinearPCSConfig) -> Self { + Self { config } + } + + pub fn commit(&self, poly: &SparseMLPoly) -> CommittedTensorCode { + // Merkle commit to the evaluations of the polynomial + let tensor_code = self.encode_zk(poly); + let tree = tensor_code.commit(self.config.num_cols(), self.config.num_rows()); + tree + } + + pub fn open( + &self, + u_hat_comm: &CommittedTensorCode, + poly: &SparseMLPoly, + point: &[F], + transcript: &mut Transcript, + ) -> TensorMLOpening { + let num_cols = self.config.num_cols(); + let num_rows = self.config.num_rows(); + debug_assert_eq!(poly.num_vars, point.len()); + + transcript.append_bytes(&u_hat_comm.committed_tree.root()); + + // ######################################## + // Testing phase + // Prove the consistency between the random linear combination of the evaluation tensor (u_prime) + // and the tensor codeword (u_hat) + // ######################################## + + // Derive the challenge vector; + let r_u = transcript.challenge_vec(num_rows); + + let u = (0..num_rows) + .map(|i| { + poly.evals[(i * num_cols)..((i + 1) * num_cols)] + .iter() + .map(|entry| entry.1) + .collect::>() + }) + .collect::>>(); + + // Random linear combination of the rows of the polynomial in a tensor structure + let test_u_prime = rlc_rows(u.clone(), &r_u); + + // Random linear combination of the blinder + let blinder = u_hat_comm + .tensor_codeword + .0 + .iter() + .map(|row| row[(row.len() / 2)..].to_vec()) + .collect::>>(); + + debug_assert_eq!(blinder[0].len(), u_hat_comm.tensor_codeword.0[0].len() / 2); + + let test_r_prime = rlc_rows(blinder.clone(), &r_u); + + let num_indices = self.config.l; + let indices = sample_indices(num_indices, num_cols * 2, transcript); + + let test_queries = self.test_phase(&indices, &u_hat_comm); + + // ######################################## + // Evaluation phase + // Prove the consistency + // ######################################## + + // Get the evaluation point + let mut point_rev = point.to_vec(); + point_rev.reverse(); + + let log2_num_rows = (num_rows as f64).log2() as usize; + let q1 = EqPoly::new(point_rev[0..log2_num_rows].to_vec()).evals(); + + let eval_r_prime = rlc_rows(blinder, &q1); + + let eval_u_prime = rlc_rows(u.clone(), &q1); + + let eval_queries = self.test_phase(&indices, &u_hat_comm); + + TensorMLOpening { + x: point.to_vec(), + y: poly.eval(&point_rev), + eval_query_leaves: eval_queries, + test_query_leaves: test_queries, + u_hat_comm: u_hat_comm.committed_tree.root(), + test_u_prime, + test_r_prime, + eval_r_prime, + eval_u_prime, + base_opening: BaseOpening { + hashes: u_hat_comm.committed_tree.column_roots.clone(), + }, + } + } +} + +impl TensorMultilinearPCS { + pub fn verify( + &self, + opening: &TensorMLOpening, + commitment: &[u8; 32], + transcript: &mut Transcript, + ) { + let num_rows = self.config.num_rows(); + let num_cols = self.config.num_cols(); + + let u_hat_comm = opening.u_hat_comm; + transcript.append_bytes(&u_hat_comm); + + assert_eq!(&u_hat_comm, commitment); + + // Verify the base opening + + let base_opening = &opening.base_opening; + base_opening.verify(u_hat_comm); + + // ######################################## + // Verify test phase + // ######################################## + + let r_u = transcript.challenge_vec(num_rows); + + let test_u_prime_rs_codeword = self + .rs_encode(&opening.test_u_prime) + .iter() + .zip(opening.test_r_prime.iter()) + .map(|(c, r)| *c + *r) + .collect::>(); + + let num_indices = self.config.l; + let indices = sample_indices(num_indices, num_cols * 2, transcript); + + debug_assert_eq!(indices.len(), opening.test_query_leaves.len()); + for (expected_index, leaves) in indices.iter().zip(opening.test_query_leaves.iter()) { + // Verify that the hashes of the leaves equals the corresponding column root + let leaf_bytes = leaves + .iter() + .map(|x| x.to_repr()) + .collect::>(); + let column_root = hash_all(&leaf_bytes); + let expected_column_root = base_opening.hashes[*expected_index]; + assert_eq!(column_root, expected_column_root); + + let mut sum = F::ZERO; + for (leaf, r_i) in leaves.iter().zip(r_u.iter()) { + sum += *r_i * *leaf; + } + assert_eq!(sum, test_u_prime_rs_codeword[*expected_index]); + } + + // ######################################## + // Verify evaluation phase + // ######################################## + + let mut x_rev = opening.x.clone(); + x_rev.reverse(); + + let log2_num_rows = (num_rows as f64).log2() as usize; + let q1 = EqPoly::new(x_rev[0..log2_num_rows].to_vec()).evals(); + let q2 = EqPoly::new(x_rev[log2_num_rows..].to_vec()).evals(); + + let eval_u_prime_rs_codeword = self + .rs_encode(&opening.eval_u_prime) + .iter() + .zip(opening.eval_r_prime.iter()) + .map(|(c, r)| *c + *r) + .collect::>(); + + debug_assert_eq!(q1.len(), opening.eval_query_leaves[0].len()); + debug_assert_eq!(indices.len(), opening.test_query_leaves.len()); + for (expected_index, leaves) in indices.iter().zip(opening.eval_query_leaves.iter()) { + // TODO: Don't need to check the leaves again? + // Verify that the hashes of the leaves equals the corresponding column root + let leaf_bytes = leaves + .iter() + .map(|x| x.to_repr()) + .collect::>(); + let column_root = hash_all(&leaf_bytes); + let expected_column_root = base_opening.hashes[*expected_index]; + assert_eq!(column_root, expected_column_root); + + let mut sum = F::ZERO; + for (leaf, q1_i) in leaves.iter().zip(q1.iter()) { + sum += *q1_i * *leaf; + } + assert_eq!(sum, eval_u_prime_rs_codeword[*expected_index]); + } + + let expected_eval = dot_prod(&opening.eval_u_prime, &q2); + assert_eq!(expected_eval, opening.y); + } + + fn split_encode(&self, message: &[F]) -> Vec { + let codeword = self.rs_encode(message); + + let mut rng = rand::thread_rng(); + let blinder = (0..codeword.len()) + .map(|_| F::random(&mut rng)) + .collect::>(); + + let mut randomized_codeword = codeword + .iter() + .zip(blinder.clone().iter()) + .map(|(c, b)| *b + *c) + .collect::>(); + + randomized_codeword.extend_from_slice(&blinder); + debug_assert_eq!(randomized_codeword.len(), codeword.len() * 2); + randomized_codeword + } + + fn rs_encode(&self, message: &[F]) -> Vec { + let codeword = if self.config.fft_domain.is_some() { + let fft_domain = self.config.fft_domain.as_ref().unwrap(); + let mut padded_coeffs = message.clone().to_vec(); + padded_coeffs.resize(fft_domain.len(), F::ZERO); + let codeword = fft(&padded_coeffs, &fft_domain); + + codeword + } else if self.config.ecfft_config.is_some() { + let ecfft_config = self.config.ecfft_config.as_ref().unwrap(); + assert_eq!( + message.len() * self.config.expansion_factor, + ecfft_config.domain[0].len() + ); + let extended_evals = extend( + message, + &ecfft_config.domain, + &ecfft_config.matrices, + &ecfft_config.inverse_matrices, + 0, + ); + + let codeword = [message.to_vec(), extended_evals].concat(); + codeword + } else { + let domain_powers = self.config.domain_powers.as_ref().unwrap(); + assert_eq!(message.len(), domain_powers[0].len()); + assert_eq!( + message.len() * self.config.expansion_factor, + domain_powers.len() + ); + + let codeword = domain_powers + .iter() + .map(|powers| { + message + .iter() + .zip(powers.iter()) + .fold(F::ZERO, |acc, (m, p)| acc + *m * *p) + }) + .collect::>(); + + codeword + }; + + codeword + } + + fn test_phase(&self, indices: &[usize], u_hat_comm: &CommittedTensorCode) -> Vec> { + let num_cols = self.config.num_cols() * 2; + + // Query the columns of u_hat + let num_indices = self.config.l; + + let u_hat_openings = indices + .iter() + .map(|index| u_hat_comm.query_column(*index, num_cols)) + .collect::>>(); + + debug_assert_eq!(u_hat_openings.len(), num_indices); + + u_hat_openings + } + + fn encode_zk(&self, poly: &SparseMLPoly) -> TensorCode { + let num_rows = self.config.num_rows(); + let num_cols = self.config.num_cols(); + + let codewords = (0..num_rows) + .map(|i| { + poly.evals[i * num_cols..(i + 1) * num_cols] + .iter() + .map(|entry| entry.1) + .collect::>() + }) + .map(|row| self.split_encode(&row)) + .collect::>>(); + + TensorCode(codewords) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::rs_config::{ecfft, naive, smooth}; + + const TEST_NUM_VARS: usize = 10; + const TEST_L: usize = 10; + + fn test_poly() -> SparseMLPoly { + let num_entries: usize = 2usize.pow(TEST_NUM_VARS as u32); + + let evals = (0..num_entries) + .map(|i| (i, F::from(i as u64))) + .collect::>(); + + let ml_poly = SparseMLPoly::new(evals, TEST_NUM_VARS); + ml_poly + } + + fn prove_and_verify(ml_poly: SparseMLPoly, pcs: TensorMultilinearPCS) { + let comm = pcs.commit(&ml_poly); + + let open_at = (0..ml_poly.num_vars) + .map(|i| F::from(i as u64)) + .collect::>(); + + let mut prover_transcript = Transcript::::new(b"test"); + let opening = pcs.open(&comm, &ml_poly, &open_at, &mut prover_transcript); + + let mut verifier_transcript = Transcript::::new(b"test"); + pcs.verify( + &opening, + &comm.committed_tree.root(), + &mut verifier_transcript, + ); + } + + fn config_base(ml_poly: &SparseMLPoly) -> TensorRSMultilinearPCSConfig { + let num_vars = ml_poly.num_vars; + let num_evals = 2usize.pow(num_vars as u32); + let num_rows = 2usize.pow((num_vars / 2) as u32); + + let expansion_factor = 2; + + TensorRSMultilinearPCSConfig:: { + expansion_factor, + domain_powers: None, + fft_domain: None, + ecfft_config: None, + l: TEST_L, + num_entries: num_evals, + num_rows, + } + } + + #[test] + fn test_tensor_pcs_fft() { + type F = halo2curves::pasta::Fp; + // FFT config + let ml_poly = test_poly(); + let mut config = config_base(&ml_poly); + config.fft_domain = Some(smooth::gen_config(config.num_cols())); + + // Test FFT PCS + let tensor_pcs_fft = TensorMultilinearPCS::::new(config); + prove_and_verify(ml_poly, tensor_pcs_fft); + } + + #[test] + fn test_tensor_pcs_ecfft() { + type F = halo2curves::secp256k1::Fp; + let ml_poly = test_poly(); + + let mut config = config_base(&ml_poly); + config.ecfft_config = Some(ecfft::gen_config(config.num_cols())); + + // Test FFT PCS + let tensor_pcs_ecf = TensorMultilinearPCS::::new(config); + prove_and_verify(ml_poly, tensor_pcs_ecf); + } + + #[test] + fn test_tensor_pcs_naive() { + type F = halo2curves::secp256k1::Fp; + // FFT config + let ml_poly = test_poly(); + + // Naive config + let mut config = config_base(&ml_poly); + config.domain_powers = Some(naive::gen_config(config.num_cols())); + + // Test FFT PCS + let tensor_pcs_naive = TensorMultilinearPCS::::new(config); + prove_and_verify(ml_poly, tensor_pcs_naive); + } +} diff --git a/tensor_pcs/src/transcript.rs b/tensor_pcs/src/transcript.rs new file mode 100644 index 0000000..db570a4 --- /dev/null +++ b/tensor_pcs/src/transcript.rs @@ -0,0 +1,58 @@ +use crate::FieldExt; +use halo2curves::ff::PrimeField; +use merlin::Transcript as MerlinTranscript; +use std::{io::Repeat, marker::PhantomData, panic::UnwindSafe}; + +#[derive(Clone)] +pub struct Transcript { + transcript_inner: MerlinTranscript, + _marker: PhantomData, +} + +impl Transcript { + pub fn new(label: &'static [u8]) -> Self { + Self { + transcript_inner: MerlinTranscript::new(label), + _marker: PhantomData, + } + } + + pub fn append_fe(&mut self, fe: &F) { + self.transcript_inner.append_message(b"", &fe.to_repr()); + } + + pub fn append_bytes(&mut self, bytes: &[u8]) { + self.transcript_inner.append_message(b"", bytes); + } + + pub fn challenge_vec(&mut self, n: usize) -> Vec { + (0..n) + .map(|_| { + let mut bytes = [0u8; 64]; + self.transcript_inner.challenge_bytes(b"", &mut bytes); + F::from_uniform_bytes(&bytes) + }) + .collect() + } + + pub fn challenge_fe(&mut self) -> F { + // TODO: This is insecure + let mut bytes = [0u8; 32]; + self.transcript_inner.challenge_bytes(b"", &mut bytes); + F::from_repr(bytes).unwrap() + } + + pub fn challenge_bytes(&mut self, bytes: &mut [u8]) { + self.transcript_inner.challenge_bytes(b"", bytes); + } +} + +pub trait AppendToTranscript { + fn append_to_transcript(&self, transcript: &mut Transcript); +} + +impl AppendToTranscript for [u8; 32] { + fn append_to_transcript(&self, transcript: &mut Transcript) { + transcript.append_bytes(self); + } +} diff --git a/tensor_pcs/src/tree.rs b/tensor_pcs/src/tree.rs new file mode 100644 index 0000000..3d51e51 --- /dev/null +++ b/tensor_pcs/src/tree.rs @@ -0,0 +1,64 @@ +use core::num; +use std::marker::PhantomData; + +use super::utils::hash_two; +use crate::{utils::hash_all, FieldExt}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone)] +pub struct CommittedMerkleTree { + pub column_roots: Vec<[u8; 32]>, + pub leaves: Vec, + pub num_cols: usize, + pub root: [u8; 32], +} + +impl CommittedMerkleTree { + pub fn from_leaves(leaves: Vec, num_cols: usize) -> Self { + let n = leaves.len(); + debug_assert!(n.is_power_of_two()); + let num_rows = n / num_cols; + assert!(num_rows & 1 == 0); // Number of rows must be even + + let leaf_bytes = leaves + .iter() + .map(|x| x.to_repr()) + .collect::>(); + + let mut column_roots = Vec::with_capacity(num_cols); + for col in 0..num_cols { + let column_leaves = leaf_bytes[col * num_rows..(col + 1) * num_rows].to_vec(); + let column_root = hash_all(&column_leaves); + column_roots.push(column_root); + } + + let root = hash_all(&column_roots); + + Self { + column_roots, + leaves, + root, + num_cols, + } + } + + pub fn root(&self) -> [u8; 32] { + self.root + } + + pub fn leaves(&self) -> Vec { + self.leaves.clone() + } +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct BaseOpening { + pub hashes: Vec<[u8; 32]>, +} + +impl BaseOpening { + pub fn verify(&self, root: [u8; 32]) -> bool { + let r = hash_all(&self.hashes); + root == r + } +} diff --git a/tensor_pcs/src/utils.rs b/tensor_pcs/src/utils.rs new file mode 100644 index 0000000..871c18d --- /dev/null +++ b/tensor_pcs/src/utils.rs @@ -0,0 +1,89 @@ +use tiny_keccak::{Hasher, Keccak}; + +use crate::FieldExt; + +use crate::transcript::Transcript; + +pub fn rlc_rows(x: Vec>, r: &[F]) -> Vec { + debug_assert_eq!(x.len(), r.len()); + let num_cols = x[0].len(); + let mut result = vec![F::ZERO; num_cols]; + for (row, r_i) in x.iter().zip(r.iter()) { + for j in 0..num_cols { + result[j] += row[j] * r_i + } + } + result +} + +pub fn dot_prod(x: &[F], y: &[F]) -> F { + assert_eq!(x.len(), y.len()); + let mut result = F::ZERO; + for i in 0..x.len() { + result += x[i] * y[i]; + } + result +} + +pub fn hash_two(values: &[[u8; 32]; 2]) -> [u8; 32] { + let mut hasher = Keccak::v256(); + hasher.update(&values[0]); + hasher.update(&values[1]); + let mut hash = [0u8; 32]; + hasher.finalize(&mut hash); + hash +} + +pub fn hash_all(values: &[[u8; 32]]) -> [u8; 32] { + let mut hasher = Keccak::v256(); + for value in values { + hasher.update(value); + } + let mut hash = [0u8; 32]; + hasher.finalize(&mut hash); + hash +} + +fn sample_index(random_bytes: [u8; 64], size: usize) -> usize { + let mut acc: u64 = 0; + for b in random_bytes { + acc = acc << 8 ^ (b as u64); + } + + (acc % (size as u64)) as usize +} + +pub fn sample_indices( + num_indices: usize, + max_index: usize, + transcript: &mut Transcript, +) -> Vec { + assert!( + num_indices <= max_index, + "max_index {:?} num_indices {:?}", + max_index, + num_indices + ); + + let mut indices = Vec::with_capacity(num_indices); + let mut counter: u32 = 0; + + // TODO: Don't sample at n and n + N + while indices.len() < num_indices { + let mut random_bytes = [0u8; 64]; + + transcript.append_bytes(&counter.to_le_bytes()); + transcript.challenge_bytes(&mut random_bytes); + + let index = sample_index(random_bytes, max_index); + if !indices.contains(&index) + // || !indices.contains(&(index + (max_index / 2))) + // || !indices.contains(&(index - (max_index / 2))) + { + indices.push(index); + } + counter += 1; + } + + indices +}