From 719f595758a8afdb6669355d14e03e7d41850114 Mon Sep 17 00:00:00 2001 From: zhenfei Date: Thu, 13 Oct 2022 23:21:30 -0400 Subject: [PATCH] Batch all (#89) - use sumcheck to batch open PCS - split Prod and witness into two batches - benchmark code --- Cargo.toml | 2 +- arithmetic/src/lib.rs | 8 +- arithmetic/src/multilinear_polynomial.rs | 93 +++- arithmetic/src/virtual_polynomial.rs | 58 ++- bench_results/plot_component | 51 +++ bench_results/plot_high_degree | 31 ++ bench_results/plot_iop | 59 +++ bench_results/plot_msm | 50 ++ bench_results/plot_multi_thread | 32 ++ bench_results/plot_spartan | 55 +++ hyperplonk/Cargo.toml | 19 +- hyperplonk/benches/bench.rs | 67 +-- hyperplonk/src/custom_gate.rs | 37 +- hyperplonk/src/errors.rs | 3 +- hyperplonk/src/lib.rs | 3 +- hyperplonk/src/mock.rs | 49 +- hyperplonk/src/snark.rs | 430 +++++++---------- hyperplonk/src/structs.rs | 56 +-- hyperplonk/src/utils.rs | 99 ++-- pcs/Cargo.toml | 51 --- pcs/src/multilinear_kzg/batching/mod.rs | 11 - .../multilinear_kzg/batching/multi_poly.rs | 426 ----------------- .../multilinear_kzg/batching/single_poly.rs | 368 --------------- pcs/src/multilinear_kzg/util.rs | 432 ------------------ poly-iop/src/prelude.rs | 4 - scripts/run_benchmarks.m4 | 37 -- scripts/run_benchmarks.sh | 112 +---- {poly-iop => subroutines}/Cargo.toml | 27 +- .../benches/iop_bench.rs | 26 +- .../benches/pcs_bench.rs | 19 +- subroutines/src/lib.rs | 5 + {pcs/src => subroutines/src/pcs}/errors.rs | 0 pcs/src/lib.rs => subroutines/src/pcs/mod.rs | 66 +-- .../src/pcs/multilinear_kzg/batching.rs | 329 +++++++++++++ .../src/pcs}/multilinear_kzg/mod.rs | 319 ++----------- .../src/pcs}/multilinear_kzg/srs.rs | 26 +- subroutines/src/pcs/multilinear_kzg/util.rs | 51 +++ {pcs/src => subroutines/src/pcs}/prelude.rs | 6 +- {pcs => subroutines/src/pcs}/readme.md | 0 {pcs/src => subroutines/src/pcs}/structs.rs | 0 .../src/pcs}/univariate_kzg/mod.rs | 189 +------- .../src/pcs}/univariate_kzg/srs.rs | 2 +- .../src/poly_iop}/errors.rs | 2 +- .../lib.rs => subroutines/src/poly_iop/mod.rs | 0 .../src/poly_iop}/perm_check/mod.rs | 26 +- .../src/poly_iop}/perm_check/util.rs | 2 +- subroutines/src/poly_iop/prelude.rs | 4 + .../src/poly_iop}/prod_check/mod.rs | 20 +- .../src/poly_iop}/prod_check/util.rs | 2 +- .../src/poly_iop}/readme.md | 0 .../src/poly_iop}/structs.rs | 0 .../src/poly_iop}/sum_check/mod.rs | 2 +- .../src/poly_iop}/sum_check/prover.rs | 85 ++-- .../src/poly_iop}/sum_check/verifier.rs | 4 +- .../src => subroutines/src/poly_iop}/utils.rs | 0 .../src/poly_iop}/zero_check/mod.rs | 4 +- 56 files changed, 1349 insertions(+), 2510 deletions(-) create mode 100644 bench_results/plot_component create mode 100644 bench_results/plot_high_degree create mode 100644 bench_results/plot_iop create mode 100644 bench_results/plot_msm create mode 100644 bench_results/plot_multi_thread create mode 100644 bench_results/plot_spartan delete mode 100644 pcs/Cargo.toml delete mode 100644 pcs/src/multilinear_kzg/batching/mod.rs delete mode 100644 pcs/src/multilinear_kzg/batching/multi_poly.rs delete mode 100644 pcs/src/multilinear_kzg/batching/single_poly.rs delete mode 100644 pcs/src/multilinear_kzg/util.rs delete mode 100644 poly-iop/src/prelude.rs delete mode 100755 scripts/run_benchmarks.m4 rename {poly-iop => subroutines}/Cargo.toml (76%) rename poly-iop/benches/bench.rs => subroutines/benches/iop_bench.rs (91%) rename pcs/benches/bench.rs => subroutines/benches/pcs_bench.rs (89%) create mode 100644 subroutines/src/lib.rs rename {pcs/src => subroutines/src/pcs}/errors.rs (100%) rename pcs/src/lib.rs => subroutines/src/pcs/mod.rs (76%) create mode 100644 subroutines/src/pcs/multilinear_kzg/batching.rs rename {pcs/src => subroutines/src/pcs}/multilinear_kzg/mod.rs (53%) rename {pcs/src => subroutines/src/pcs}/multilinear_kzg/srs.rs (90%) create mode 100644 subroutines/src/pcs/multilinear_kzg/util.rs rename {pcs/src => subroutines/src/pcs}/prelude.rs (84%) rename {pcs => subroutines/src/pcs}/readme.md (100%) rename {pcs/src => subroutines/src/pcs}/structs.rs (100%) rename {pcs/src => subroutines/src/pcs}/univariate_kzg/mod.rs (55%) rename {pcs/src => subroutines/src/pcs}/univariate_kzg/srs.rs (98%) rename {poly-iop/src => subroutines/src/poly_iop}/errors.rs (97%) rename poly-iop/src/lib.rs => subroutines/src/poly_iop/mod.rs (100%) rename {poly-iop/src => subroutines/src/poly_iop}/perm_check/mod.rs (94%) rename {poly-iop/src => subroutines/src/poly_iop}/perm_check/util.rs (97%) create mode 100644 subroutines/src/poly_iop/prelude.rs rename {poly-iop/src => subroutines/src/poly_iop}/prod_check/mod.rs (96%) rename {poly-iop/src => subroutines/src/poly_iop}/prod_check/util.rs (98%) rename {poly-iop => subroutines/src/poly_iop}/readme.md (100%) rename {poly-iop/src => subroutines/src/poly_iop}/structs.rs (100%) rename {poly-iop/src => subroutines/src/poly_iop}/sum_check/mod.rs (99%) rename {poly-iop/src => subroutines/src/poly_iop}/sum_check/prover.rs (64%) rename {poly-iop/src => subroutines/src/poly_iop}/sum_check/verifier.rs (99%) rename {poly-iop/src => subroutines/src/poly_iop}/utils.rs (100%) rename {poly-iop/src => subroutines/src/poly_iop}/zero_check/mod.rs (98%) diff --git a/Cargo.toml b/Cargo.toml index 31bc286..8371096 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ members = [ "arithmetic", "hyperplonk", - "poly-iop", + "subroutines", "transcript", "util" ] diff --git a/arithmetic/src/lib.rs b/arithmetic/src/lib.rs index 43576ec..6910908 100644 --- a/arithmetic/src/lib.rs +++ b/arithmetic/src/lib.rs @@ -6,10 +6,10 @@ mod virtual_polynomial; pub use errors::ArithErrors; pub use multilinear_polynomial::{ - evaluate_no_par, evaluate_opt, fix_first_variable, fix_variables, identity_permutation_mle, - merge_polynomials, random_mle_list, random_permutation_mle, random_zero_mle_list, - DenseMultilinearExtension, + evaluate_no_par, evaluate_opt, fix_last_variables, fix_last_variables_no_par, fix_variables, + identity_permutation_mle, merge_polynomials, random_mle_list, random_permutation_mle, + random_zero_mle_list, DenseMultilinearExtension, }; pub use univariate_polynomial::{build_l, get_uni_domain}; pub use util::{bit_decompose, gen_eval_point, get_batched_nv, get_index}; -pub use virtual_polynomial::{build_eq_x_r, VPAuxInfo, VirtualPolynomial}; +pub use virtual_polynomial::{build_eq_x_r, build_eq_x_r_vec, VPAuxInfo, VirtualPolynomial}; diff --git a/arithmetic/src/multilinear_polynomial.rs b/arithmetic/src/multilinear_polynomial.rs index 1bcef2b..a073819 100644 --- a/arithmetic/src/multilinear_polynomial.rs +++ b/arithmetic/src/multilinear_polynomial.rs @@ -123,36 +123,24 @@ pub fn fix_variables( DenseMultilinearExtension::::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) } -pub fn fix_first_variable( - poly: &DenseMultilinearExtension, - partial_point: &F, -) -> DenseMultilinearExtension { - assert!(poly.num_vars != 0, "invalid size of partial point"); - - let nv = poly.num_vars; - let res = fix_one_variable_helper(&poly.evaluations, nv, partial_point); - DenseMultilinearExtension::::from_evaluations_slice(nv - 1, &res) -} - fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { let mut res = vec![F::zero(); 1 << (nv - 1)]; - let one_minus_p = F::one() - point; // evaluate single variable of partial point from left to right #[cfg(not(feature = "parallel"))] - for b in 0..(1 << (nv - 1)) { - res[b] = data[b << 1] * one_minus_p + data[(b << 1) + 1] * point; + for i in 0..(1 << (nv - 1)) { + res[i] = data[i] + (data[(i << 1) + 1] - data[i << 1]) * point; } #[cfg(feature = "parallel")] if nv >= 13 { // on my computer we parallelization doesn't help till nv >= 13 res.par_iter_mut().enumerate().for_each(|(i, x)| { - *x = data[i << 1] * one_minus_p + data[(i << 1) + 1] * point; + *x = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point; }); } else { - for b in 0..(1 << (nv - 1)) { - res[b] = data[b << 1] * one_minus_p + data[(b << 1) + 1] * point; + for i in 0..(1 << (nv - 1)) { + res[i] = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point; } } @@ -178,9 +166,8 @@ fn fix_variables_no_par( // evaluate single variable of partial point from left to right for i in 1..dim + 1 { let r = partial_point[i - 1]; - let one_minus_r = F::one() - r; for b in 0..(1 << (nv - i)) { - poly[b] = poly[b << 1] * one_minus_r + poly[(b << 1) + 1] * r; + poly[b] = poly[b << 1] + (poly[(b << 1) + 1] - poly[b << 1]) * r; } } DenseMultilinearExtension::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) @@ -210,3 +197,71 @@ pub fn merge_polynomials( merged_nv, scalars, ))) } + +pub fn fix_last_variables_no_par( + poly: &DenseMultilinearExtension, + partial_point: &[F], +) -> DenseMultilinearExtension { + let mut res = fix_last_variable_no_par(poly, partial_point.last().unwrap()); + for p in partial_point.iter().rev().skip(1) { + res = fix_last_variable_no_par(&res, p); + } + res +} + +fn fix_last_variable_no_par( + poly: &DenseMultilinearExtension, + partial_point: &F, +) -> DenseMultilinearExtension { + let nv = poly.num_vars(); + let half_len = 1 << (nv - 1); + let mut res = vec![F::zero(); half_len]; + for (i, e) in res.iter_mut().enumerate().take(half_len) { + *e = poly.evaluations[i] + + *partial_point * (poly.evaluations[i + half_len] - poly.evaluations[i]); + } + DenseMultilinearExtension::from_evaluations_vec(nv - 1, res) +} +pub fn fix_last_variables( + poly: &DenseMultilinearExtension, + partial_point: &[F], +) -> DenseMultilinearExtension { + assert!( + partial_point.len() <= poly.num_vars, + "invalid size of partial point" + ); + let nv = poly.num_vars; + let mut poly = poly.evaluations.to_vec(); + let dim = partial_point.len(); + // evaluate single variable of partial point from left to right + for (i, point) in partial_point.iter().rev().enumerate().take(dim) { + poly = fix_last_variable_helper(&poly, nv - i, point); + } + + DenseMultilinearExtension::::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) +} + +fn fix_last_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { + let half_len = 1 << (nv - 1); + let mut res = vec![F::zero(); half_len]; + + // evaluate single variable of partial point from left to right + #[cfg(not(feature = "parallel"))] + for b in 0..half_len { + res[b] = data[b] + (data[b + half_len] - data[b]) * point; + } + + #[cfg(feature = "parallel")] + if nv >= 13 { + // on my computer we parallelization doesn't help till nv >= 13 + res.par_iter_mut().enumerate().for_each(|(i, x)| { + *x = data[i] + (data[i + half_len] - data[i]) * point; + }); + } else { + for b in 0..(1 << (nv - 1)) { + res[b] = data[b] + (data[b + half_len] - data[b]) * point; + } + } + + res +} diff --git a/arithmetic/src/virtual_polynomial.rs b/arithmetic/src/virtual_polynomial.rs index bb6b44c..a6f1eb3 100644 --- a/arithmetic/src/virtual_polynomial.rs +++ b/arithmetic/src/virtual_polynomial.rs @@ -10,6 +10,7 @@ use ark_std::{ rand::{Rng, RngCore}, start_timer, }; +use rayon::prelude::*; use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, rc::Rc}; #[rustfmt::skip] @@ -324,16 +325,29 @@ impl VirtualPolynomial { } } -// This function build the eq(x, r) polynomial for any given r. -// -// Evaluate -// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) -// over r, which is -// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) +/// This function build the eq(x, r) polynomial for any given r. +/// +/// Evaluate +/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) +/// over r, which is +/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) pub fn build_eq_x_r( r: &[F], ) -> Result>, ArithErrors> { - let start = start_timer!(|| "zero check build eq_x_r"); + let evals = build_eq_x_r_vec(r)?; + let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), evals); + + Ok(Rc::new(mle)) +} +/// This function build the eq(x, r) polynomial for any given r, and output the +/// evaluation of eq(x, r) in its vector form. +/// +/// Evaluate +/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) +/// over r, which is +/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) +pub fn build_eq_x_r_vec(r: &[F]) -> Result, ArithErrors> { + let start = start_timer!(|| format!("build eq_x_r of size {}", r.len())); // we build eq(x,r) from its evaluations // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars @@ -349,11 +363,8 @@ pub fn build_eq_x_r( let mut eval = Vec::new(); build_eq_x_r_helper(r, &mut eval)?; - let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), eval); - - let res = Rc::new(mle); end_timer!(start); - Ok(res) + Ok(eval) } /// A helper function to build eq(x, r) recursively. @@ -373,13 +384,24 @@ fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), A // for the current step we will need // if x_0 = 0: (1-r0) * [b_1, ..., b_k] // if x_0 = 1: r0 * [b_1, ..., b_k] - - let mut res = vec![]; - for &b_i in buf.iter() { - let tmp = r[0] * b_i; - res.push(b_i - tmp); - res.push(tmp); - } + // let mut res = vec![]; + // for &b_i in buf.iter() { + // let tmp = r[0] * b_i; + // res.push(b_i - tmp); + // res.push(tmp); + // } + // *buf = res; + + let mut res = vec![F::zero(); buf.len() << 1]; + res.par_iter_mut().enumerate().for_each(|(i, val)| { + let bi = buf[i >> 1]; + let tmp = r[0] * bi; + if i & 1 == 0 { + *val = bi - tmp; + } else { + *val = tmp; + } + }); *buf = res; } diff --git a/bench_results/plot_component b/bench_results/plot_component new file mode 100644 index 0000000..a9eb5db --- /dev/null +++ b/bench_results/plot_component @@ -0,0 +1,51 @@ +filename = 'pie_chart.txt' +set terminal postscript eps enhanced color font "18" +set size square +set output "components.eps" + +rowi = 0 +rowf = 7 + +# obtain sum(column(2)) from rows `rowi` to `rowf` +set datafile separator ',' +stats filename u 2 every ::rowi::rowf noout prefix "A" + +# rowf should not be greater than length of file +rowf = (rowf-rowi > A_records - 1 ? A_records + rowi - 1 : rowf) + +angle(x)=x*360/A_sum +percentage(x)=x*100/A_sum + +# circumference dimensions for pie-chart +centerX=0 +centerY=0 +radius=1 + +# label positions +yposmin = 0.0 +yposmax = 0.95*radius +xpos = -0.8*radius +ypos(i) = -2.2*radius + yposmax - i*(yposmax-yposmin)/(1.0*rowf-rowi) + +#------------------------------------------------------------------- +# now we can configure the canvas +set style fill solid 1 # filled pie-chart +unset key # no automatic labels +unset tics # remove tics +unset border # remove borders; if some label is missing, comment to see what is happening + +set size ratio -1 # equal scale length +set xrange [-radius:3*radius] # [-1:2] leaves space for labels +set yrange [-3*radius:radius] # [-1:1] + +#------------------------------------------------------------------- +pos = 0 # init angle +colour = 0 # init colour + +# 1st line: plot pie-chart +# 2nd line: draw colored boxes at (xpos):(ypos) +# 3rd line: place labels at (xpos+offset):(ypos) +plot filename u (centerX):(centerY):(radius):(pos):(pos=pos+angle($2)):(colour=colour+1) every ::rowi::rowf w circle lc var,\ + for [i=0:rowf-rowi] '+' u (xpos):(ypos(i)) w p pt 5 ps 4 lc i+1,\ + for [i=0:rowf-rowi] filename u (xpos):(ypos(i)):(sprintf('%04.1f%% %s', percentage($2), stringcolumn(1))) every ::i+rowi::i+rowi w labels left offset 3,0 + \ No newline at end of file diff --git a/bench_results/plot_high_degree b/bench_results/plot_high_degree new file mode 100644 index 0000000..79c3211 --- /dev/null +++ b/bench_results/plot_high_degree @@ -0,0 +1,31 @@ +set terminal postscript eps enhanced color font "18" +filename = '64threads_growing_degree.txt' +set output "grow_degree.eps" + +# set font "32" + +set key left top +set grid +# set logscale y +# set logscale x + + +set title font ",64" +set key font ",18" +set xtics font ",20" +set ytics font ",20" +set xlabel font ",20" +set ylabel font ",20" + +# set key title "IOP proving time" +set key title font ", 20" +# set key title "2^{15} constraints" +set xlabel "degree d" +set ylabel 'time (us)' +# set yrange [] +# set xrange [500000:1100000] +# set xtics (0, 1,2,4,8,16,32) +plot filename using 1:2 w lp t "q_Lw_1 + q_Rw_2 + q_Mw_1^{d-1}w_2 + q_C = 0", + + +reset diff --git a/bench_results/plot_iop b/bench_results/plot_iop new file mode 100644 index 0000000..01e678a --- /dev/null +++ b/bench_results/plot_iop @@ -0,0 +1,59 @@ +set terminal postscript eps enhanced color font "18" +sumcheck = 'iop/sum_check.txt' +zerocheck = 'iop/zero_check.txt' +permcheck = 'iop/perm_check.txt' +prodcheck = 'iop/prod_check.txt' + +set output "iop_prover.eps" + +set font "64" + +set key left +set grid +set logscale y + +set title font ",64" +set key font ",18" +set xtics font ",20" +set ytics font ",20" +set xlabel font ",20" +set ylabel font ",20" + +set key title "IOP proving time" +set key title font ", 20" +set xlabel "\#variables" +set ylabel 'time (ms)' +# set xtics (4,8,16,32,64) +plot sumcheck using 1:2 w lp t "Sum Check",\ + zerocheck using 1:2 w lp t "Zero Check",\ + prodcheck using 1:2 w lp t "Prod Check",\ + permcheck using 1:2 w lp t "Perm Check", +reset + + +# set terminal postscript eps enhanced color +# sumcheck = 'iop/sum_check.txt' +# zerocheck = 'iop/zero_check.txt' +# permcheck = 'iop/perm_check.txt' +# prodcheck = 'iop/prod_check.txt' + +# set output "iop_verifier.eps" + +# set font "32" + +# set key left +# set grid +# set logscale y + +# set title font ",10" +# set key title "IOP verifier time" +# set xlabel "\#variables" +# set ylabel 'log time (us)' +# # set xtics (4,8,16,32,64) +# plot sumcheck using 1:3 w lp t "Sum Check",\ +# zerocheck using 1:3 w lp t "Zero Check",\ +# prodcheck using 1:3 w lp t "Prod Check",\ +# permcheck using 1:3 w lp t "Perm Check", +# reset + + diff --git a/bench_results/plot_msm b/bench_results/plot_msm new file mode 100644 index 0000000..abde419 --- /dev/null +++ b/bench_results/plot_msm @@ -0,0 +1,50 @@ +filename = 'msm_vs_eval.txt' +set terminal postscript eps enhanced color font "18" +set size square +set output "msm_vs_eval.eps" + +rowi = 0 +rowf = 3 + +# obtain sum(column(2)) from rows `rowi` to `rowf` +set datafile separator ',' +stats filename u 2 every ::rowi::rowf noout prefix "A" + +# rowf should not be greater than length of file +rowf = (rowf-rowi > A_records - 1 ? A_records + rowi - 1 : rowf) + +angle(x)=x*360/A_sum +percentage(x)=x*100/A_sum + +# circumference dimensions for pie-chart +centerX=0 +centerY=0 +radius=1 + +# label positions +yposmin = 0.0 +yposmax = 0.95*radius +xpos = -0.8*radius +ypos(i) = -2.2*radius + yposmax - i*(yposmax-yposmin)/(1.0*rowf-rowi) + +#------------------------------------------------------------------- +# now we can configure the canvas +set style fill solid 1 # filled pie-chart +unset key # no automatic labels +unset tics # remove tics +unset border # remove borders; if some label is missing, comment to see what is happening + +set size ratio -1 # equal scale length +set xrange [-radius:radius] # [-1:2] leaves space for labels +set yrange [-3*radius:radius] # [-1:1] + +#------------------------------------------------------------------- +pos = 0 # init angle +colour = 0 # init colour + +# 1st line: plot pie-chart +# 2nd line: draw colored boxes at (xpos):(ypos) +# 3rd line: place labels at (xpos+offset):(ypos) +plot filename u (centerX):(centerY):(radius):(pos):(pos=pos+angle($2)):(colour=colour+1) every ::rowi::rowf w circle lc var,\ + for [i=0:rowf-rowi] '+' u (xpos):(ypos(i)) w p pt 5 ps 4 lc i+1,\ + for [i=0:rowf-rowi] filename u (xpos):(ypos(i)):(sprintf('%05.2f%% %s', percentage($2), stringcolumn(1))) every ::i+rowi::i+rowi w labels left offset 3,0 \ No newline at end of file diff --git a/bench_results/plot_multi_thread b/bench_results/plot_multi_thread new file mode 100644 index 0000000..87c0f90 --- /dev/null +++ b/bench_results/plot_multi_thread @@ -0,0 +1,32 @@ +set terminal postscript eps enhanced color "18" +filename = 'degree_16_grow_threads_1006.txt' +set output "vanilla_multi_threads.eps" + +# set font "32" + +# set font "32" + +set key left +set grid +set logscale y + +set title font ",64" +set key font ",18" +set xtics font ",20" +set ytics font ",20" +set xlabel font ",20" +set ylabel font ",20" +set key title font ", 20" + +set key title "Multi-threading\n performance" +set xlabel "\#threads" +set ylabel 'time (us)' +plot filename using 1:2 w lp t "1 thread",\ + filename using 1:3 w lp t "2 threads",\ + filename using 1:4 w lp t "4 threads",\ + filename using 1:5 w lp t "8 threads",\ + filename using 1:6 w lp t "16 threads",\ + filename using 1:7 w lp t "32 threads",\ + + +reset diff --git a/bench_results/plot_spartan b/bench_results/plot_spartan new file mode 100644 index 0000000..cd5a9f6 --- /dev/null +++ b/bench_results/plot_spartan @@ -0,0 +1,55 @@ +set terminal postscript eps enhanced color font "18" +spartan = 'comparison/spartan.txt' +hyperplonk = 'comparison/hyperplonk.txt' +jellyfish = 'comparison/jellyfish.txt' + +set output "spartan_prover.eps" + +set title font ",64" +set key font ",18" +set xtics font ",20" +set ytics font ",20" +set xlabel font ",20" +set ylabel font ",20" + +set key left +set grid +set logscale y + +set xrange [9:20] +# set title font ",10" +# set key title "Proving time" +set xlabel "log \# constraits" +set ylabel 'time (sec)' +# set xtics (4,8,16,32,64) +plot spartan using 1:2 w lp t "spartan",\ + jellyfish using 1:2 w lp t "jellyfish plonk",\ + hyperplonk using 1:2 w lp t "hyperplonk", +reset + +set terminal postscript eps enhanced color font "18" +ratio = 'comparison/ratio.txt' + +set output "ratio.eps" + + +set title font ",64" +set key font ",18" +set xtics font ",20" +set ytics font ",20" +set xlabel font ",20" +set ylabel font ",20" +# set font "32" + +set key left +set grid +# set logscale y + +set xrange [9:20] +set title font ",10" +# set key title "Proving time" +set xlabel "log \# constraits" +set ylabel 'ratio ' +# set xtics (4,8,16,32,64) +plot ratio using 1:6 w lp t "Jellyfish/Hyperplonk",\ + ratio using 1:7 w lp t "Spartan/Hyperplonk" diff --git a/hyperplonk/Cargo.toml b/hyperplonk/Cargo.toml index 042f99e..5045d17 100644 --- a/hyperplonk/Cargo.toml +++ b/hyperplonk/Cargo.toml @@ -15,8 +15,7 @@ ark-serialize = { version = "^0.3.0", default-features = false, features = [ "de displaydoc = { version = "0.2.3", default-features = false } -poly-iop = { path = "../poly-iop" } -pcs = { path = "../pcs" } +subroutines = { path = "../subroutines" } transcript = { path = "../transcript" } arithmetic = { path = "../arithmetic" } util = { path = "../util" } @@ -33,15 +32,14 @@ path = "benches/bench.rs" harness = false [features] -# default = [ ] -default = [ "parallel" ] +# default = [ ] +# default = [ "parallel" ] # default = [ "parallel", "print-trace" ] -# default = [ "parallel", "extensive_sanity_checks" ] +default = [ "parallel", "extensive_sanity_checks" ] bench = [ "parallel" ] # extensive sanity checks that are useful for debugging extensive_sanity_checks = [ - "poly-iop/extensive_sanity_checks", - "pcs/extensive_sanity_checks", + "subroutines/extensive_sanity_checks", ] parallel = [ "rayon", @@ -49,14 +47,13 @@ parallel = [ "ark-ff/parallel", "ark-poly/parallel", "ark-ec/parallel", - - "poly-iop/parallel", + "arithmetic/parallel", - "pcs/parallel", + "subroutines/parallel", "util/parallel" ] print-trace = [ "ark-std/print-trace", - "poly-iop/print-trace", "arithmetic/print-trace", + "subroutines/print-trace" ] \ No newline at end of file diff --git a/hyperplonk/benches/bench.rs b/hyperplonk/benches/bench.rs index d819230..d98981d 100644 --- a/hyperplonk/benches/bench.rs +++ b/hyperplonk/benches/bench.rs @@ -7,36 +7,45 @@ use hyperplonk::{ prelude::{CustomizedGates, HyperPlonkErrors, MockCircuit}, HyperPlonkSNARK, }; -use pcs::{ - prelude::{MultilinearKzgPCS, MultilinearUniversalParams, UnivariateUniversalParams}, - PolynomialCommitmentScheme, -}; -use poly_iop::PolyIOP; use rayon::ThreadPoolBuilder; +use subroutines::{ + pcs::{ + prelude::{MultilinearKzgPCS, MultilinearUniversalParams}, + PolynomialCommitmentScheme, + }, + poly_iop::PolyIOP, +}; + +const SUPPORTED_SIZE: usize = 20; +const MIN_NUM_VARS: usize = 8; +const MAX_NUM_VARS: usize = 15; +const MIN_CUSTOM_DEGREE: usize = 1; +const MAX_CUSTOM_DEGREE: usize = 32; fn main() -> Result<(), HyperPlonkErrors> { let args: Vec = env::args().collect(); - let thread = args[1].parse().unwrap_or(12); - + let thread = args[1].parse().unwrap_or(24); + let mut rng = test_rng(); + let pcs_srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, SUPPORTED_SIZE)?; ThreadPoolBuilder::new() .num_threads(thread) .build_global() .unwrap(); - bench_vanilla_plonk(thread)?; - for degree in [1, 2, 4, 8, 16, 32] { - bench_high_degree_plonk(degree, thread)?; + bench_vanilla_plonk(&pcs_srs, thread)?; + for degree in MIN_CUSTOM_DEGREE..MAX_CUSTOM_DEGREE { + bench_high_degree_plonk(&pcs_srs, degree, thread)?; } Ok(()) } -fn bench_vanilla_plonk(thread: usize) -> Result<(), HyperPlonkErrors> { - let mut rng = test_rng(); - let pcs_srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 22)?; - - let filename = format!("vanilla nv {}.txt", thread); +fn bench_vanilla_plonk( + pcs_srs: &MultilinearUniversalParams, + thread: usize, +) -> Result<(), HyperPlonkErrors> { + let filename = format!("vanilla threads {}.txt", thread); let mut file = File::create(filename).unwrap(); - for nv in 1..16 { + for nv in MIN_NUM_VARS..MAX_NUM_VARS { let vanilla_gate = CustomizedGates::vanilla_plonk_gate(); bench_mock_circuit_zkp_helper(&mut file, nv, &vanilla_gate, &pcs_srs)?; } @@ -44,14 +53,15 @@ fn bench_vanilla_plonk(thread: usize) -> Result<(), HyperPlonkErrors> { Ok(()) } -fn bench_high_degree_plonk(degree: usize, thread: usize) -> Result<(), HyperPlonkErrors> { - let mut rng = test_rng(); - let pcs_srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 22)?; - +fn bench_high_degree_plonk( + pcs_srs: &MultilinearUniversalParams, + degree: usize, + thread: usize, +) -> Result<(), HyperPlonkErrors> { let filename = format!("high degree {} thread {}.txt", degree, thread); let mut file = File::create(filename).unwrap(); - for nv in 1..16 { - let vanilla_gate = CustomizedGates::vanilla_plonk_gate(); + for nv in MIN_NUM_VARS..MAX_NUM_VARS { + let vanilla_gate = CustomizedGates::mock_gate(2, degree); bench_mock_circuit_zkp_helper(&mut file, nv, &vanilla_gate, &pcs_srs)?; } @@ -62,17 +72,14 @@ fn bench_mock_circuit_zkp_helper( file: &mut File, nv: usize, gate: &CustomizedGates, - pcs_srs: &( - MultilinearUniversalParams, - UnivariateUniversalParams, - ), + pcs_srs: &MultilinearUniversalParams, ) -> Result<(), HyperPlonkErrors> { let repetition = if nv < 10 { - 10 - } else if nv < 20 { 5 - } else { + } else if nv < 20 { 2 + } else { + 1 }; //========================================================== @@ -120,7 +127,7 @@ fn bench_mock_circuit_zkp_helper( )?; } let t = start.elapsed().as_micros() / repetition as u128; - println!("proving for {} variables: {} us", nv, t); + file.write_all(format!("{} {}\n", nv, t).as_ref()).unwrap(); let proof = as HyperPlonkSNARK>>::prove( diff --git a/hyperplonk/src/custom_gate.rs b/hyperplonk/src/custom_gate.rs index 1586963..1683afb 100644 --- a/hyperplonk/src/custom_gate.rs +++ b/hyperplonk/src/custom_gate.rs @@ -145,14 +145,43 @@ impl CustomizedGates { pub fn mock_gate(num_witness: usize, degree: usize) -> Self { let mut gates = vec![]; - let high_degree_term = vec![0; degree]; + let mut high_degree_term = vec![0; degree - 1]; + high_degree_term.push(1); gates.push((1, Some(0), high_degree_term)); - for i in 1..num_witness { - gates.push((1, Some(i), vec![i])) + for i in 0..num_witness { + gates.push((1, Some(i + 1), vec![i])) } - gates.push((1, Some(num_witness), vec![])); + gates.push((1, Some(num_witness + 1), vec![])); CustomizedGates { gates } } + + /// Return a plonk gate where #selector > #witness * 2 + /// ``` ignore + /// q_1 w_1 + q_2 w_2 + q_3 w_3 + + /// q_4 w1w2 + q_5 w1w3 + q_6 w2w3 + + /// q_7 = 0 + /// ``` + /// which is + /// ``` ignore + /// (1, Some(id_qL), vec![id_W1]), + /// (1, Some(id_qR), vec![id_W2]), + /// (1, Some(id_qO), vec![id_W3]), + /// (1, Some(id_qM), vec![id_W1, id_w2]), + /// (1, Some(id_qC), vec![]), + /// ``` + pub fn super_long_selector_gate() -> Self { + Self { + gates: vec![ + (1, Some(0), vec![0]), + (1, Some(1), vec![1]), + (1, Some(2), vec![2]), + (1, Some(3), vec![0, 1]), + (1, Some(4), vec![0, 2]), + (1, Some(5), vec![1, 2]), + (1, Some(6), vec![]), + ], + } + } } diff --git a/hyperplonk/src/errors.rs b/hyperplonk/src/errors.rs index 83a17a3..15e9482 100644 --- a/hyperplonk/src/errors.rs +++ b/hyperplonk/src/errors.rs @@ -4,8 +4,7 @@ use arithmetic::ArithErrors; use ark_serialize::SerializationError; use ark_std::string::String; use displaydoc::Display; -use pcs::prelude::PCSError; -use poly_iop::prelude::PolyIOPErrors; +use subroutines::{pcs::prelude::PCSError, poly_iop::prelude::PolyIOPErrors}; use transcript::TranscriptError; /// A `enum` specifying the possible failure modes of hyperplonk. diff --git a/hyperplonk/src/lib.rs b/hyperplonk/src/lib.rs index 4df019e..a111fe1 100644 --- a/hyperplonk/src/lib.rs +++ b/hyperplonk/src/lib.rs @@ -2,8 +2,7 @@ use ark_ec::PairingEngine; use errors::HyperPlonkErrors; -use pcs::prelude::PolynomialCommitmentScheme; -use poly_iop::prelude::PermutationCheck; +use subroutines::{pcs::prelude::PolynomialCommitmentScheme, poly_iop::prelude::PermutationCheck}; use witness::WitnessColumn; mod custom_gate; diff --git a/hyperplonk/src/mock.rs b/hyperplonk/src/mock.rs index 7539963..0833ff2 100644 --- a/hyperplonk/src/mock.rs +++ b/hyperplonk/src/mock.rs @@ -135,11 +135,18 @@ mod test { use super::*; use crate::{errors::HyperPlonkErrors, HyperPlonkSNARK}; use ark_bls12_381::{Bls12_381, Fr}; - use pcs::{ - prelude::{MultilinearKzgPCS, MultilinearUniversalParams, UnivariateUniversalParams}, - PolynomialCommitmentScheme, + use subroutines::{ + pcs::{ + prelude::{MultilinearKzgPCS, MultilinearUniversalParams}, + PolynomialCommitmentScheme, + }, + poly_iop::PolyIOP, }; - use poly_iop::PolyIOP; + + const SUPPORTED_SIZE: usize = 20; + const MIN_NUM_VARS: usize = 8; + const MAX_NUM_VARS: usize = 15; + const CUSTOM_DEGREE: [usize; 6] = [1, 2, 4, 8, 16, 32]; #[test] fn test_mock_circuit_sat() { @@ -153,7 +160,7 @@ mod test { assert!(circuit.is_satisfied()); for num_witness in 2..10 { - for degree in 1..10 { + for degree in CUSTOM_DEGREE { let mock_gate = CustomizedGates::mock_gate(num_witness, degree); let circuit = MockCircuit::::new(1 << i, &mock_gate); assert!(circuit.is_satisfied()); @@ -165,10 +172,7 @@ mod test { fn test_mock_circuit_zkp_helper( nv: usize, gate: &CustomizedGates, - pcs_srs: &( - MultilinearUniversalParams, - UnivariateUniversalParams, - ), + pcs_srs: &MultilinearUniversalParams, ) -> Result<(), HyperPlonkErrors> { let circuit = MockCircuit::::new(1 << nv, gate); assert!(circuit.is_satisfied()); @@ -201,18 +205,19 @@ mod test { #[test] fn test_mock_circuit_zkp() -> Result<(), HyperPlonkErrors> { let mut rng = test_rng(); - let pcs_srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 16)?; - for nv in 1..10 { + let pcs_srs = + MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, SUPPORTED_SIZE)?; + for nv in MIN_NUM_VARS..MAX_NUM_VARS { let vanilla_gate = CustomizedGates::vanilla_plonk_gate(); test_mock_circuit_zkp_helper(nv, &vanilla_gate, &pcs_srs)?; } - for nv in 1..10 { + for nv in MIN_NUM_VARS..MAX_NUM_VARS { let tubro_gate = CustomizedGates::jellyfish_turbo_plonk_gate(); test_mock_circuit_zkp_helper(nv, &tubro_gate, &pcs_srs)?; } let nv = 5; for num_witness in 2..10 { - for degree in [1, 2, 4, 8, 16] { + for degree in CUSTOM_DEGREE { let mock_gate = CustomizedGates::mock_gate(num_witness, degree); test_mock_circuit_zkp_helper(nv, &mock_gate, &pcs_srs)?; } @@ -224,12 +229,26 @@ mod test { #[test] fn test_mock_circuit_e2e() -> Result<(), HyperPlonkErrors> { let mut rng = test_rng(); - let pcs_srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 23)?; - let nv = 18; + let pcs_srs = + MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, SUPPORTED_SIZE)?; + let nv = MAX_NUM_VARS; let vanilla_gate = CustomizedGates::vanilla_plonk_gate(); test_mock_circuit_zkp_helper(nv, &vanilla_gate, &pcs_srs)?; Ok(()) } + + #[test] + fn test_mock_long_selector_e2e() -> Result<(), HyperPlonkErrors> { + let mut rng = test_rng(); + let pcs_srs = + MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, SUPPORTED_SIZE)?; + let nv = MAX_NUM_VARS; + + let long_selector_gate = CustomizedGates::super_long_selector_gate(); + test_mock_circuit_zkp_helper(nv, &long_selector_gate, &pcs_srs)?; + + Ok(()) + } } diff --git a/hyperplonk/src/snark.rs b/hyperplonk/src/snark.rs index b0283be..7f46a1c 100644 --- a/hyperplonk/src/snark.rs +++ b/hyperplonk/src/snark.rs @@ -5,18 +5,19 @@ use crate::{ witness::WitnessColumn, HyperPlonkSNARK, }; -use arithmetic::{ - evaluate_opt, gen_eval_point, identity_permutation_mle, merge_polynomials, VPAuxInfo, -}; +use arithmetic::{evaluate_opt, identity_permutation_mle, merge_polynomials, VPAuxInfo}; use ark_ec::PairingEngine; use ark_poly::DenseMultilinearExtension; use ark_std::{end_timer, log2, start_timer, One, Zero}; -use pcs::prelude::{compute_qx_degree, PolynomialCommitmentScheme}; -use poly_iop::{ - prelude::{PermutationCheck, ZeroCheck}, - PolyIOP, +use std::{marker::PhantomData, rc::Rc}; +use subroutines::{ + pcs::prelude::{Commitment, PolynomialCommitmentScheme}, + poly_iop::{ + prelude::{PermutationCheck, ZeroCheck}, + PolyIOP, + }, + BatchProof, }; -use std::{cmp::max, marker::PhantomData, rc::Rc}; use transcript::IOPTranscript; impl HyperPlonkSNARK for PolyIOP @@ -30,6 +31,8 @@ where Polynomial = Rc>, Point = Vec, Evaluation = E::Fr, + Commitment = Commitment, + BatchProof = BatchProof, >, { type Index = HyperPlonkIndex; @@ -44,29 +47,16 @@ where let num_vars = index.num_variables(); let log_num_witness_polys = log2(index.num_witness_columns()) as usize; - let log_num_selector_polys = log2(index.num_selector_columns()) as usize; - let witness_merged_nv = num_vars + log_num_witness_polys; - let selector_merged_nv = num_vars + log_num_selector_polys; - - let max_nv = max(witness_merged_nv + 1, selector_merged_nv); - let max_points = max( - // prod(x) has 5 points - 5, - max( - // selector points - index.num_selector_columns(), - // witness points + public input point + perm point - index.num_witness_columns() + 2, - ), - ); - let supported_uni_degree = compute_qx_degree(max_nv, max_points); - let supported_ml_degree = max_nv; + let log_chunk_size = log_num_witness_polys + 1; + let prod_x_nv = num_vars + log_chunk_size; + + let supported_ml_degree = prod_x_nv; // extract PCS prover and verifier keys from SRS let (pcs_prover_param, pcs_verifier_param) = - PCS::trim(pcs_srs, supported_uni_degree, Some(supported_ml_degree))?; + PCS::trim(pcs_srs, None, Some(supported_ml_degree))?; // build permutation oracles let permutation_oracle = Rc::new(DenseMultilinearExtension::from_evaluations_slice( @@ -82,22 +72,27 @@ where .map(|s| Rc::new(DenseMultilinearExtension::from(s))) .collect(); - let selector_merged = merge_polynomials(&selector_oracles)?; - let selector_com = PCS::commit(&pcs_prover_param, &selector_merged)?; + let selector_commitments = selector_oracles + .iter() + .map(|poly| PCS::commit(&pcs_prover_param, poly)) + .collect::, _>>()?; + + // let selector_merged = merge_polynomials(&selector_oracles)?; + // let selector_com = PCS::commit(&pcs_prover_param, &selector_merged)?; Ok(( Self::ProvingKey { params: index.params.clone(), permutation_oracle: permutation_oracle.clone(), selector_oracles, - selector_com: selector_com.clone(), + selector_commitments: selector_commitments.clone(), pcs_param: pcs_prover_param, }, Self::VerifyingKey { params: index.params.clone(), permutation_oracle, pcs_param: pcs_verifier_param, - selector_com, + selector_commitments, perm_com, }, )) @@ -163,9 +158,13 @@ where // witness assignment of length 2^n let num_vars = pk.params.num_variables(); let log_num_witness_polys = log2(pk.params.num_witness_columns()) as usize; - let log_num_selector_polys = log2(pk.params.num_selector_columns()) as usize; - // number of variables in merged polynomial for Multilinear-KZG let merged_nv = num_vars + log_num_witness_polys; + + // number of nv in prod(x) which is supposed to be the cap + // so each chunk we we store maximum 1 << (prod_x_nv - num_var) selectors + let log_chunk_size = log_num_witness_polys + 1; + let prod_x_nv = num_vars + log_chunk_size; + // online public input of length 2^\ell let ell = log2(pk.params.num_pub_input) as usize; @@ -176,23 +175,31 @@ where // - prod(x) // - selectors // - // Accumulator for w_merged and its points - let mut w_merged_pcs_acc = PcsAccumulator::::new(); - // Accumulator for prod(x) and its points - let mut prod_pcs_acc = PcsAccumulator::::new(); - // Accumulator for prod(x) and its points - let mut selector_pcs_acc = PcsAccumulator::::new(); - - let witness_polys: Vec>> = witnesses - .iter() - .map(|w| Rc::new(DenseMultilinearExtension::from(w))) - .collect(); + // Accumulator's nv is bounded by prod(x) that means + // we need to split the selectors into multiple chunks if + // #selectors > chunk_size + // let mut pcs_acc = PcsAccumulator::::new(prod_x_nv); + let mut prod_x_pcs_acc = PcsAccumulator::::new(prod_x_nv); + let mut witness_and_selector_x_pcs_acc = PcsAccumulator::::new(num_vars); // ======================================================================= // 1. Commit Witness polynomials `w_i(x)` and append commitment to // transcript // ======================================================================= let step = start_timer!(|| "commit witnesses"); + + let witness_polys: Vec>> = witnesses + .iter() + .map(|w| Rc::new(DenseMultilinearExtension::from(w))) + .collect(); + + let witness_commits = witness_polys + .iter() + .map(|x| PCS::commit(&pk.pcs_param, x).unwrap()) + .collect::>(); + + // merge all witness into a single MLE - we will run perm check on it + // to obtain prod(x) let w_merged = merge_polynomials(&witness_polys)?; if w_merged.num_vars != merged_nv { return Err(HyperPlonkErrors::InvalidParameters(format!( @@ -200,9 +207,11 @@ where w_merged.num_vars, merged_nv ))); } - let w_merged_com = PCS::commit(&pk.pcs_param, &w_merged)?; - w_merged_pcs_acc.init_poly(w_merged.clone(), w_merged_com.clone())?; - transcript.append_serializable_element(b"w", &w_merged_com)?; + + // TODO: we'll remove one of witness_merged_commit and witness_commits later. + let witness_merged_commit = PCS::commit(&pk.pcs_param, &w_merged)?; + transcript.append_serializable_element(b"w", &witness_merged_commit)?; + end_timer!(step); // ======================================================================= // 2 Run ZeroCheck on @@ -266,7 +275,6 @@ where // 4.1 (deferred) open prod(0,x), prod(1, x), prod(x, 0), prod(x, 1) // perm_check_point - prod_pcs_acc.init_poly(prod_x, perm_check_proof.prod_x_comm.clone())?; // prod(0, x) let tmp_point1 = [perm_check_point.as_slice(), &[E::Fr::zero()]].concat(); // prod(1, x) @@ -275,145 +283,77 @@ where let tmp_point3 = [&[E::Fr::zero()], perm_check_point.as_slice()].concat(); // prod(x, 1) let tmp_point4 = [&[E::Fr::one()], perm_check_point.as_slice()].concat(); - // prod(1, ..., 1, 0) - let tmp_point5 = [vec![E::Fr::zero()], vec![E::Fr::one(); merged_nv]].concat(); + // // prod(1, ..., 1, 0) + // let tmp_point5 = [vec![E::Fr::zero()], vec![E::Fr::one(); + // merged_nv]].concat(); - prod_pcs_acc.insert_point(&tmp_point1); - prod_pcs_acc.insert_point(&tmp_point2); - prod_pcs_acc.insert_point(&tmp_point3); - prod_pcs_acc.insert_point(&tmp_point4); - prod_pcs_acc.insert_point(&tmp_point5); + prod_x_pcs_acc.insert_poly_and_points(&prod_x, &perm_check_proof.prod_x_comm, &tmp_point1); + prod_x_pcs_acc.insert_poly_and_points(&prod_x, &perm_check_proof.prod_x_comm, &tmp_point2); + prod_x_pcs_acc.insert_poly_and_points(&prod_x, &perm_check_proof.prod_x_comm, &tmp_point3); + prod_x_pcs_acc.insert_poly_and_points(&prod_x, &perm_check_proof.prod_x_comm, &tmp_point4); // 4.2 permutation check - // - 4.2.1. (deferred) wi_poly(perm_check_point) - w_merged_pcs_acc.insert_point(perm_check_point); - - #[cfg(feature = "extensive_sanity_checks")] - { - // sanity check - let eval = pk - .permutation_oracle - .evaluate(&perm_check_proof.zero_check_proof.point) - .ok_or_else(|| { - HyperPlonkErrors::InvalidParameters( - "perm_oracle evaluation dimension does not match".to_string(), - ) - })?; - if eval != perm_oracle_eval { - return Err(HyperPlonkErrors::InvalidProver( - "perm_oracle evaluation is different from PCS opening".to_string(), - )); - } - } + // - 4.2.1. wi_poly(perm_check_point) + let (perm_check_opening, perm_check_eval) = + PCS::open(&pk.pcs_param, &w_merged, perm_check_point)?; // - 4.3. zero check evaluations and proofs // - 4.3.1 (deferred) wi_poly(zero_check_point) for i in 0..witness_polys.len() { - let tmp_point = gen_eval_point(i, log_num_witness_polys, &zero_check_proof.point); - // Deferred opening zero check proof - w_merged_pcs_acc.insert_point(&tmp_point); + witness_and_selector_x_pcs_acc.insert_poly_and_points( + &witness_polys[i], + &witness_commits[i], + &zero_check_proof.point, + ); } // - 4.3.2. (deferred) selector_poly(zero_check_point) - let selector_merged = merge_polynomials(&pk.selector_oracles)?; - selector_pcs_acc.init_poly(selector_merged, pk.selector_com.clone())?; - for i in 0..pk.selector_oracles.len() { - let tmp_point = gen_eval_point(i, log_num_selector_polys, &zero_check_proof.point); - // Deferred opening zero check proof - selector_pcs_acc.insert_point(&tmp_point); - } + pk.selector_oracles + .iter() + .zip(pk.selector_commitments.iter()) + .for_each(|(poly, com)| { + witness_and_selector_x_pcs_acc.insert_poly_and_points( + poly, + com, + &zero_check_proof.point, + ) + }); // - 4.4. public input consistency checks // - pi_poly(r_pi) where r_pi is sampled from transcript let r_pi = transcript.get_and_append_challenge_vectors(b"r_pi", ell)?; - let tmp_point = [ - vec![E::Fr::zero(); num_vars - ell], - r_pi, - vec![E::Fr::zero(); log_num_witness_polys], - ] - .concat(); - w_merged_pcs_acc.insert_point(&tmp_point); - - #[cfg(feature = "extensive_sanity_checks")] - { - // sanity check - let pi_poly = Rc::new(DenseMultilinearExtension::from_evaluations_slice( - ell, pub_input, - )); - - let eval = pi_poly.evaluate(&r_pi).ok_or_else(|| { - HyperPlonkErrors::InvalidParameters( - "public input evaluation dimension does not match".to_string(), - ) - })?; - if eval != pi_eval { - return Err(HyperPlonkErrors::InvalidProver( - "public input evaluation is different from PCS opening".to_string(), - )); - } - } + let tmp_point = [vec![E::Fr::zero(); num_vars - ell], r_pi].concat(); + witness_and_selector_x_pcs_acc.insert_poly_and_points( + &witness_polys[0], + &witness_commits[0], + &tmp_point, + ); end_timer!(step); // ======================================================================= // 5. deferred batch opening // ======================================================================= - let step = start_timer!(|| "deferred batch openings"); - let sub_step = start_timer!(|| "open witness"); - let (w_merged_batch_opening, w_merged_batch_evals) = - w_merged_pcs_acc.batch_open(&pk.pcs_param)?; - end_timer!(sub_step); - - let sub_step = start_timer!(|| "open prod(x)"); - let (prod_batch_openings, prod_batch_evals) = prod_pcs_acc.batch_open(&pk.pcs_param)?; - end_timer!(sub_step); - - let sub_step = start_timer!(|| "open selector"); - let (selector_batch_opening, selector_batch_evals) = - selector_pcs_acc.batch_open(&pk.pcs_param)?; - end_timer!(sub_step); + let step = start_timer!(|| "deferred batch openings prod(x)"); + let batch_prod_x_openings = prod_x_pcs_acc.multi_open(&pk.pcs_param, &mut transcript)?; end_timer!(step); + + let step = start_timer!(|| "deferred batch openings witness and selectors"); + let batch_witness_and_selector_openings = + witness_and_selector_x_pcs_acc.multi_open(&pk.pcs_param, &mut transcript)?; + end_timer!(step); + end_timer!(start); Ok(HyperPlonkProof { - // ======================================================================= - // witness related - // ======================================================================= - /// PCS commit for witnesses - w_merged_com, - // Batch opening for witness commitment - // - PermCheck eval: 1 point - // - ZeroCheck evals: #witness points - // - public input eval: 1 point - w_merged_batch_opening, - // Evaluations of Witness - // - PermCheck eval: 1 point - // - ZeroCheck evals: #witness points - // - public input eval: 1 point - w_merged_batch_evals, - // ======================================================================= - // prod(x) related - // ======================================================================= - // prod(x)'s openings - // - prod(0, x), - // - prod(1, x), - // - prod(x, 0), - // - prod(x, 1), - // - prod(1, ..., 1,0) - prod_batch_openings, - // prod(x)'s evaluations - // - prod(0, x), - // - prod(1, x), - // - prod(x, 0), - // - prod(x, 1), - // - prod(1, ..., 1,0) - prod_batch_evals, - // ======================================================================= - // selectors related - // ======================================================================= - // PCS openings for selectors on zero check point - selector_batch_opening, - // Evaluates of selectors on zero check point - selector_batch_evals, + // PCS commit for witnesses + witness_merged_commit, + witness_commits, + // batch_openings, + batch_prod_x_openings, + batch_witness_and_selector_openings, + // perm check openings + perm_check_opening, + perm_check_eval, // ======================================================================= // IOP proofs // ======================================================================= @@ -460,15 +400,38 @@ where let start = start_timer!(|| "hyperplonk verification"); let mut transcript = IOPTranscript::::new(b"hyperplonk"); + + let num_selectors = vk.params.num_selector_columns(); + let num_witnesses = vk.params.num_witness_columns(); + // witness assignment of length 2^n + let log_num_witness_polys = log2(num_witnesses) as usize; let num_vars = vk.params.num_variables(); - let log_num_witness_polys = log2(vk.params.num_witness_columns()) as usize; // number of variables in merged polynomial for Multilinear-KZG let merged_nv = num_vars + log_num_witness_polys; // online public input of length 2^\ell let ell = log2(vk.params.num_pub_input) as usize; + // sequence: + // - prod(x) at 5 points + // - w_merged at perm check point + // - w_merged at zero check points (#witness points) + // - selector_merged at zero check points (#selector points) + // - w[0] at r_pi + let selector_evals = &proof + .batch_witness_and_selector_openings + .f_i_eval_at_point_i[num_witnesses..num_witnesses + num_selectors]; + let witness_evals = &proof + .batch_witness_and_selector_openings + .f_i_eval_at_point_i[..num_witnesses]; + let prod_evals = &proof.batch_prod_x_openings.f_i_eval_at_point_i[0..4]; + let pi_eval = proof + .batch_witness_and_selector_openings + .f_i_eval_at_point_i + .last() + .unwrap(); + let pi_poly = DenseMultilinearExtension::from_evaluations_slice(ell as usize, pub_input); // ======================================================================= @@ -482,27 +445,6 @@ where 1 << ell ))); } - if proof.selector_batch_evals.len() - 1 != vk.params.num_selector_columns() { - return Err(HyperPlonkErrors::InvalidVerifier(format!( - "Selector length is not correct: got {}, expect {}", - proof.selector_batch_evals.len() - 1, - 1 << vk.params.num_selector_columns() - ))); - } - if proof.w_merged_batch_evals.len() != vk.params.num_witness_columns() + 3 { - return Err(HyperPlonkErrors::InvalidVerifier(format!( - "Witness length is not correct: got {}, expect {}", - proof.w_merged_batch_evals.len() - 3, - vk.params.num_witness_columns() - ))); - } - if proof.prod_batch_evals.len() - 1 != 5 { - return Err(HyperPlonkErrors::InvalidVerifier(format!( - "the number of product polynomial evaluations is not correct: got {}, expect {}", - proof.prod_batch_evals.len() - 1, - 5 - ))); - } // ======================================================================= // 1. Verify zero_check_proof on @@ -522,7 +464,7 @@ where phantom: PhantomData::default(), }; // push witness to transcript - transcript.append_serializable_element(b"w", &proof.w_merged_com)?; + transcript.append_serializable_element(b"w", &proof.witness_merged_commit)?; let zero_check_sub_claim = >::verify( &proof.zero_check_proof, @@ -533,11 +475,7 @@ where let zero_check_point = &zero_check_sub_claim.point; // check zero check subclaim - let f_eval = eval_f( - &vk.params.gate_func, - &proof.selector_batch_evals[..vk.params.num_selector_columns()], - &proof.w_merged_batch_evals[1..], - )?; + let f_eval = eval_f(&vk.params.gate_func, selector_evals, witness_evals)?; if f_eval != zero_check_sub_claim.expected_evaluation { return Err(HyperPlonkErrors::InvalidProof( "zero check evaluation failed".to_string(), @@ -592,12 +530,10 @@ where let s_id_eval = evaluate_opt(&s_id, perm_check_point); let s_perm_eval = evaluate_opt(&vk.permutation_oracle, perm_check_point); - let q_x_rec = proof.prod_batch_evals[1] - - proof.prod_batch_evals[2] * proof.prod_batch_evals[3] + let q_x_rec = prod_evals[1] - prod_evals[2] * prod_evals[3] + alpha - * ((proof.w_merged_batch_evals[0] + beta * s_perm_eval + gamma) - * proof.prod_batch_evals[0] - - (proof.w_merged_batch_evals[0] + beta * s_id_eval + gamma)); + * ((prod_evals[0] + beta * s_perm_eval + gamma) * prod_evals[0] + - (prod_evals[0] + beta * s_id_eval + gamma)); if q_x_rec != perm_check_sub_claim @@ -619,84 +555,66 @@ where // ======================================================================= // 3.1 open prod(x)' evaluations // ======================================================================= - let prod_final_query = perm_check_sub_claim.product_check_sub_claim.final_query; - let points = [ + // TODO: Check prod(x) at (1,...,1,0) + let _prod_final_query = perm_check_sub_claim.product_check_sub_claim.final_query; + let prod_points = [ [perm_check_point.as_slice(), &[E::Fr::zero()]].concat(), [perm_check_point.as_slice(), &[E::Fr::one()]].concat(), [&[E::Fr::zero()], perm_check_point.as_slice()].concat(), [&[E::Fr::one()], perm_check_point.as_slice()].concat(), - prod_final_query.0, + // prod_final_query.0, ]; - if !PCS::batch_verify_single_poly( + let mut r_pi = transcript.get_and_append_challenge_vectors(b"r_pi", ell)?; + + let res = PCS::batch_verify( &vk.pcs_param, - &proof.perm_check_proof.prod_x_comm, - &points, - &proof.prod_batch_evals, - &proof.prod_batch_openings, - )? { - return Err(HyperPlonkErrors::InvalidProof( - "prod(0, x) pcs verification failed".to_string(), - )); - } + [proof.perm_check_proof.prod_x_comm; 4].as_ref(), + prod_points.as_ref(), + &proof.batch_prod_x_openings, + &mut transcript, + )?; + assert!(res); // ======================================================================= - // 3.2 open selectors' evaluations + // 3.3 open witnesses' and selectors evaluations // ======================================================================= - let log_num_selector_polys = log2(vk.params.num_selector_columns()) as usize; - let mut points = vec![]; - for i in 0..vk.params.num_selector_columns() { - let tmp_point = - gen_eval_point(i, log_num_selector_polys, &proof.zero_check_proof.point); - points.push(tmp_point); - } - if !PCS::batch_verify_single_poly( + let res = PCS::verify( &vk.pcs_param, - &vk.selector_com, - &points, - &proof.selector_batch_evals, - &proof.selector_batch_opening, - )? { - return Err(HyperPlonkErrors::InvalidProof( - "selector pcs verification failed".to_string(), - )); - } + &proof.witness_merged_commit, + perm_check_point, + &proof.perm_check_eval, + &proof.perm_check_opening, + )?; + assert!(res); - // ======================================================================= - // 3.2 open witnesses' evaluations - // ======================================================================= - let mut r_pi = transcript.get_and_append_challenge_vectors(b"r_pi", ell)?; - let pi_eval = evaluate_opt(&pi_poly, &r_pi); - assert_eq!( - pi_eval, - proof.w_merged_batch_evals[proof.w_merged_batch_evals.len() - 2] - ); + let pi_eval_rec = evaluate_opt(&pi_poly, &r_pi); + assert_eq!(&pi_eval_rec, pi_eval); - r_pi = [ - vec![E::Fr::zero(); num_vars - ell], - r_pi, - vec![E::Fr::zero(); log_num_witness_polys], + r_pi = [vec![E::Fr::zero(); num_vars - ell], r_pi].concat(); + let commitments = [ + proof.witness_commits.as_slice(), + vk.selector_commitments.as_slice(), + &[proof.witness_commits[0]], ] .concat(); - let mut points = vec![perm_check_point.clone()]; + let points = [ + vec![zero_check_point.clone(); num_witnesses + num_selectors].as_slice(), + &[r_pi], + ] + .concat(); - for i in 0..proof.w_merged_batch_evals.len() - 3 { - points.push(gen_eval_point(i, log_num_witness_polys, zero_check_point)) - } - points.push(r_pi); - if !PCS::batch_verify_single_poly( + let res = PCS::batch_verify( &vk.pcs_param, - &proof.w_merged_com, - &points, - &proof.w_merged_batch_evals, - &proof.w_merged_batch_opening, - )? { - return Err(HyperPlonkErrors::InvalidProof( - "witness for permutation check pcs verification failed".to_string(), - )); - } + commitments.as_ref(), + points.as_ref(), + &proof.batch_witness_and_selector_openings, + &mut transcript, + )?; + + assert!(res); end_timer!(step); end_timer!(start); @@ -714,7 +632,7 @@ mod tests { use arithmetic::random_permutation_mle; use ark_bls12_381::Bls12_381; use ark_std::test_rng; - use pcs::prelude::MultilinearKzgPCS; + use subroutines::pcs::prelude::MultilinearKzgPCS; #[test] fn test_hyperplonk_e2e() -> Result<(), HyperPlonkErrors> { diff --git a/hyperplonk/src/structs.rs b/hyperplonk/src/structs.rs index b63c947..8fa10ce 100644 --- a/hyperplonk/src/structs.rs +++ b/hyperplonk/src/structs.rs @@ -5,61 +5,31 @@ use ark_ec::PairingEngine; use ark_ff::PrimeField; use ark_poly::DenseMultilinearExtension; use ark_std::log2; -use pcs::PolynomialCommitmentScheme; -use poly_iop::prelude::{PermutationCheck, ZeroCheck}; use std::rc::Rc; +use subroutines::{ + pcs::PolynomialCommitmentScheme, + poly_iop::prelude::{PermutationCheck, ZeroCheck}, +}; /// The proof for the HyperPlonk PolyIOP, consists of the following: /// - a batch commitment to all the witness MLEs /// - a batch opening to all the MLEs at certain index /// - the zero-check proof for checking custom gate-satisfiability /// - the permutation-check proof for checking the copy constraints -#[derive(Clone, Debug, Default, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct HyperPlonkProof where E: PairingEngine, PC: PermutationCheck, PCS: PolynomialCommitmentScheme, { - // ======================================================================= - // witness related - // ======================================================================= // PCS commit for witnesses - pub w_merged_com: PCS::Commitment, - // Batch opening for witness commitment - // - PermCheck eval: 1 point - // - ZeroCheck evals: #witness points - // - public input eval: 1 point - pub w_merged_batch_opening: PCS::BatchProof, - // Evaluations of Witness - // - PermCheck eval: 1 point - // - ZeroCheck evals: #witness points - // - public input eval: 1 point - pub w_merged_batch_evals: Vec, - // ======================================================================= - // prod(x) related - // ======================================================================= - // prod(x)'s openings - // - prod(0, x), - // - prod(1, x), - // - prod(x, 0), - // - prod(x, 1), - // - prod(1, ..., 1,0) - pub prod_batch_openings: PCS::BatchProof, - // prod(x)'s evaluations - // - prod(0, x), - // - prod(1, x), - // - prod(x, 0), - // - prod(x, 1), - // - prod(1, ..., 1,0) - pub prod_batch_evals: Vec, - // ======================================================================= - // selectors related - // ======================================================================= - // PCS openings for selectors on zero check point - pub selector_batch_opening: PCS::BatchProof, - // Evaluates of selectors on zero check point - pub selector_batch_evals: Vec, + pub witness_merged_commit: PCS::Commitment, + pub witness_commits: Vec, + pub batch_prod_x_openings: PCS::BatchProof, + pub batch_witness_and_selector_openings: PCS::BatchProof, + pub perm_check_opening: PCS::Proof, + pub perm_check_eval: PCS::Evaluation, // ======================================================================= // IOP proofs // ======================================================================= @@ -144,7 +114,7 @@ pub struct HyperPlonkProvingKey>>, /// A commitment to the preprocessed selector polynomials - pub selector_com: PCS::Commitment, + pub selector_commitments: Vec, /// The parameters for PCS commitment pub pcs_param: PCS::ProverParam, } @@ -162,7 +132,7 @@ pub struct HyperPlonkVerifyingKey, /// Permutation oracle's commitment pub perm_com: PCS::Commitment, } diff --git a/hyperplonk/src/utils.rs b/hyperplonk/src/utils.rs index a474379..2dcdf73 100644 --- a/hyperplonk/src/utils.rs +++ b/hyperplonk/src/utils.rs @@ -2,83 +2,84 @@ use crate::{ custom_gate::CustomizedGates, errors::HyperPlonkErrors, structs::HyperPlonkParams, witness::WitnessColumn, }; -use arithmetic::VirtualPolynomial; +use arithmetic::{evaluate_opt, VirtualPolynomial}; use ark_ec::PairingEngine; use ark_ff::PrimeField; use ark_poly::DenseMultilinearExtension; -use pcs::PolynomialCommitmentScheme; use std::{borrow::Borrow, rc::Rc}; +use subroutines::pcs::{prelude::Commitment, PolynomialCommitmentScheme}; +use transcript::IOPTranscript; /// An accumulator structure that holds a polynomial and /// its opening points #[derive(Debug)] pub(super) struct PcsAccumulator> { - pub(crate) polynomial: Option, - pub(crate) poly_commit: Option, + // sequence: + // - prod(x) at 5 points + // - w_merged at perm check point + // - w_merged at zero check points (#witness points) + // - selector_merged at zero check points (#selector points) + // - w[0] at r_pi + pub(crate) num_var: usize, + pub(crate) polynomials: Vec, + pub(crate) commitments: Vec, pub(crate) points: Vec, + pub(crate) evals: Vec, } -impl> PcsAccumulator { +impl PcsAccumulator +where + E: PairingEngine, + PCS: PolynomialCommitmentScheme< + E, + Polynomial = Rc>, + Point = Vec, + Evaluation = E::Fr, + Commitment = Commitment, + >, +{ /// Create an empty accumulator. - pub(super) fn new() -> Self { + pub(super) fn new(num_var: usize) -> Self { Self { - polynomial: None, - poly_commit: None, + num_var, + polynomials: vec![], + commitments: vec![], points: vec![], + evals: vec![], } } - /// Initialize the polynomial; requires both the polynomial - /// and its commitment. - pub(super) fn init_poly( + /// Push a new evaluation point into the accumulator + pub(super) fn insert_poly_and_points( &mut self, - polynomial: PCS::Polynomial, - commitment: PCS::Commitment, - ) -> Result<(), HyperPlonkErrors> { - if self.polynomial.is_some() || self.poly_commit.is_some() { - return Err(HyperPlonkErrors::InvalidProver( - "poly already set for accumulator".to_string(), - )); - } + poly: &PCS::Polynomial, + commit: &PCS::Commitment, + point: &PCS::Point, + ) { + assert!(poly.num_vars == point.len()); + assert!(poly.num_vars == self.num_var); - self.polynomial = Some(polynomial); - self.poly_commit = Some(commitment); - Ok(()) - } + let eval = evaluate_opt(poly, point); - /// Push a new evaluation point into the accumulator - pub(super) fn insert_point(&mut self, point: &PCS::Point) { - self.points.push(point.clone()) + self.evals.push(eval); + self.polynomials.push(poly.clone()); + self.points.push(point.clone()); + self.commitments.push(*commit); } /// Batch open all the points over a merged polynomial. /// A simple wrapper of PCS::multi_open - pub(super) fn batch_open( + pub(super) fn multi_open( &self, prover_param: impl Borrow, - ) -> Result<(PCS::BatchProof, Vec), HyperPlonkErrors> { - let poly = match &self.polynomial { - Some(p) => p, - None => { - return Err(HyperPlonkErrors::InvalidProver( - "poly is set for accumulator".to_string(), - )) - }, - }; - - let commitment = match &self.poly_commit { - Some(p) => p, - None => { - return Err(HyperPlonkErrors::InvalidProver( - "poly is set for accumulator".to_string(), - )) - }, - }; - Ok(PCS::multi_open_single_poly( + transcript: &mut IOPTranscript, + ) -> Result { + Ok(PCS::multi_open( prover_param.borrow(), - commitment, - poly, - &self.points, + self.polynomials.as_ref(), + self.points.as_ref(), + self.evals.as_ref(), + transcript, )?) } } diff --git a/pcs/Cargo.toml b/pcs/Cargo.toml deleted file mode 100644 index 9e2813d..0000000 --- a/pcs/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pcs" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] - -ark-std = { version = "^0.3.0", default-features = false } -ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] } -ark-ff = { version = "^0.3.0", default-features = false } -ark-ec = { version = "^0.3.0", default-features = false } -ark-poly = {version = "^0.3.0", default-features = false } -ark-sponge = {version = "^0.3.0", default-features = false} -ark-bls12-381 = { version = "0.3.0", default-features = false, features = [ "curve" ] } - -displaydoc = { version = "0.2.3", default-features = false } -derivative = { version = "2", features = ["use_core"] } - -arithmetic = { path = "../arithmetic" } -transcript = { path = "../transcript" } -util = { path = "../util" } - -rayon = { version = "1.5.2", default-features = false, optional = true } -itertools = { version = "0.10.4", optional = true } - -# Benchmarks -[[bench]] -name = "pcs-benches" -path = "benches/bench.rs" -harness = false - -[features] -# default = [ "parallel", "print-trace" ] -default = [ "parallel",] -extensive_sanity_checks = [ ] -parallel = [ - "rayon", - "itertools", - "ark-std/parallel", - "ark-ff/parallel", - "ark-poly/parallel", - "ark-ec/parallel", - "util/parallel", - "arithmetic/parallel", - ] -print-trace = [ - "ark-std/print-trace", - "arithmetic/print-trace", - ] \ No newline at end of file diff --git a/pcs/src/multilinear_kzg/batching/mod.rs b/pcs/src/multilinear_kzg/batching/mod.rs deleted file mode 100644 index 08a5070..0000000 --- a/pcs/src/multilinear_kzg/batching/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2022 Espresso Systems (espressosys.com) -// This file is part of the Jellyfish library. - -// You should have received a copy of the MIT License -// along with the Jellyfish library. If not, see . - -mod multi_poly; -mod single_poly; - -pub(crate) use multi_poly::*; -pub(crate) use single_poly::*; diff --git a/pcs/src/multilinear_kzg/batching/multi_poly.rs b/pcs/src/multilinear_kzg/batching/multi_poly.rs deleted file mode 100644 index b3fd1bb..0000000 --- a/pcs/src/multilinear_kzg/batching/multi_poly.rs +++ /dev/null @@ -1,426 +0,0 @@ -use crate::{ - multilinear_kzg::{ - open_internal, - srs::{MultilinearProverParam, MultilinearVerifierParam}, - util::compute_w_circ_l, - verify_internal, MultilinearKzgBatchProof, - }, - prelude::{Commitment, UnivariateProverParam, UnivariateVerifierParam}, - univariate_kzg::UnivariateKzgPCS, - PCSError, PolynomialCommitmentScheme, -}; -use arithmetic::{build_l, get_uni_domain, merge_polynomials}; -use ark_ec::PairingEngine; -use ark_poly::{DenseMultilinearExtension, EvaluationDomain, MultilinearExtension, Polynomial}; -use ark_std::{end_timer, format, rc::Rc, start_timer, string::ToString, vec, vec::Vec}; -use transcript::IOPTranscript; - -/// Input -/// - the prover parameters for univariate KZG, -/// - the prover parameters for multilinear KZG, -/// - a list of MLEs, -/// - a commitment to all MLEs -/// - and a same number of points, -/// compute a multi-opening for all the polynomials. -/// -/// For simplicity, this API requires each MLE to have only one point. If -/// the caller wish to use more than one points per MLE, it should be -/// handled at the caller layer, and utilize 'multi_open_same_poly_internal' -/// API. -/// -/// Returns an error if the lengths do not match. -/// -/// Returns the proof, consists of -/// - the multilinear KZG opening -/// - the univariate KZG commitment to q(x) -/// - the openings and evaluations of q(x) at omega^i and r -/// -/// Steps: -/// 1. build `l(points)` which is a list of univariate polynomials that goes -/// through the points -/// 2. build MLE `w` which is the merge of all MLEs. -/// 3. build `q(x)` which is a univariate polynomial `W circ l` -/// 4. commit to q(x) and sample r from transcript -/// transcript contains: w commitment, points, q(x)'s commitment -/// 5. build q(omega^i) and their openings -/// 6. build q(r) and its opening -/// 7. get a point `p := l(r)` -/// 8. output an opening of `w` over point `p` -/// 9. output `w(p)` -pub(crate) fn multi_open_internal( - uni_prover_param: &UnivariateProverParam, - ml_prover_param: &MultilinearProverParam, - polynomials: &[Rc>], - multi_commitment: &Commitment, - points: &[Vec], -) -> Result<(MultilinearKzgBatchProof, Vec), PCSError> { - let open_timer = start_timer!(|| "multi open"); - - // =================================== - // Sanity checks on inputs - // =================================== - let points_len = points.len(); - if points_len == 0 { - return Err(PCSError::InvalidParameters("points is empty".to_string())); - } - - if points_len != polynomials.len() { - return Err(PCSError::InvalidParameters( - "polynomial length does not match point length".to_string(), - )); - } - - let num_var = polynomials[0].num_vars(); - for poly in polynomials.iter().skip(1) { - if poly.num_vars() != num_var { - return Err(PCSError::InvalidParameters( - "polynomials do not have same num_vars".to_string(), - )); - } - } - for point in points.iter() { - if point.len() != num_var { - return Err(PCSError::InvalidParameters( - "points do not have same num_vars".to_string(), - )); - } - } - - let domain = get_uni_domain::(points_len)?; - - // 1. build `l(points)` which is a list of univariate polynomials that goes - // through the points - let uni_polys = build_l(points, &domain, true)?; - - // 2. build MLE `w` which is the merge of all MLEs. - let merge_poly = merge_polynomials(polynomials)?; - - // 3. build `q(x)` which is a univariate polynomial `W circ l` - let q_x = compute_w_circ_l(&merge_poly, &uni_polys, points.len(), true)?; - - // 4. commit to q(x) and sample r from transcript - // transcript contains: w commitment, points, q(x)'s commitment - let mut transcript = IOPTranscript::new(b"ml kzg"); - transcript.append_serializable_element(b"w", multi_commitment)?; - for point in points { - transcript.append_serializable_element(b"w", point)?; - } - - let q_x_commit = UnivariateKzgPCS::::commit(uni_prover_param, &q_x)?; - transcript.append_serializable_element(b"q(x)", &q_x_commit)?; - let r = transcript.get_and_append_challenge(b"r")?; - // 5. build q(omega^i) and their openings - let mut q_x_opens = vec![]; - let mut q_x_evals = vec![]; - for i in 0..points_len { - let (q_x_open, q_x_eval) = - UnivariateKzgPCS::::open(uni_prover_param, &q_x, &domain.element(i))?; - q_x_opens.push(q_x_open); - q_x_evals.push(q_x_eval); - #[cfg(feature = "extensive_sanity_checks")] - { - // sanity check - let point: Vec = uni_polys - .iter() - .map(|poly| poly.evaluate(&domain.element(i))) - .collect(); - let mle_eval = merge_poly.evaluate(&point).unwrap(); - if mle_eval != q_x_eval { - return Err(PCSError::InvalidProver( - "Q(omega) does not match W(l(omega))".to_string(), - )); - } - } - } - - // 6. build q(r) and its opening - let (q_x_open, q_r_value) = UnivariateKzgPCS::::open(uni_prover_param, &q_x, &r)?; - q_x_opens.push(q_x_open); - q_x_evals.push(q_r_value); - - // 7. get a point `p := l(r)` - let point: Vec = uni_polys.iter().map(|poly| poly.evaluate(&r)).collect(); - // 8. output an opening of `w` over point `p` - let (mle_opening, mle_eval) = open_internal(ml_prover_param, &merge_poly, &point)?; - - // 9. output value that is `w` evaluated at `p` (which should match `q(r)`) - if mle_eval != q_r_value { - return Err(PCSError::InvalidProver( - "Q(r) does not match W(l(r))".to_string(), - )); - } - end_timer!(open_timer); - Ok(( - MultilinearKzgBatchProof { - proof: mle_opening, - q_x_commit, - q_x_opens, - }, - q_x_evals, - )) -} - -/// Verifies that the `multi_commitment` is a valid commitment -/// to a list of MLEs for the given openings and evaluations in -/// the batch_proof. -/// -/// steps: -/// -/// 1. push w, points and q_com into transcript -/// 2. sample `r` from transcript -/// 3. check `q(r) == batch_proof.q_x_value.last` and -/// `q(omega^i) == batch_proof.q_x_value[i]` -/// 4. build `l(points)` which is a list of univariate -/// polynomials that goes through the points -/// 5. get a point `p := l(r)` -/// 6. verifies `p` is valid against multilinear KZG proof -pub(crate) fn batch_verify_internal( - uni_verifier_param: &UnivariateVerifierParam, - ml_verifier_param: &MultilinearVerifierParam, - multi_commitment: &Commitment, - points: &[Vec], - values: &[E::Fr], - batch_proof: &MultilinearKzgBatchProof, -) -> Result { - let verify_timer = start_timer!(|| "batch verify"); - - // =================================== - // Sanity checks on inputs - // =================================== - let points_len = points.len(); - if points_len == 0 { - return Err(PCSError::InvalidParameters("points is empty".to_string())); - } - - // add one here because we also have q(r) and its opening - if points_len + 1 != batch_proof.q_x_opens.len() { - return Err(PCSError::InvalidParameters(format!( - "openings length {} does not match point length {}", - points_len + 1, - batch_proof.q_x_opens.len() - ))); - } - - if points_len + 1 != values.len() { - return Err(PCSError::InvalidParameters(format!( - "values length {} does not match point length {}", - points_len + 1, - values.len() - ))); - } - - let num_var = points[0].len(); - for point in points.iter().skip(1) { - if point.len() != num_var { - return Err(PCSError::InvalidParameters(format!( - "points do not have same num_vars ({} vs {})", - point.len(), - num_var, - ))); - } - } - - let domain = get_uni_domain::(points_len)?; - // 1. push w, points and q_com into transcript - let mut transcript = IOPTranscript::new(b"ml kzg"); - transcript.append_serializable_element(b"w", multi_commitment)?; - - for point in points { - transcript.append_serializable_element(b"w", point)?; - } - - transcript.append_serializable_element(b"q(x)", &batch_proof.q_x_commit)?; - // 2. sample `r` from transcript - let r = transcript.get_and_append_challenge(b"r")?; - // 3. check `q(r) == batch_proof.q_x_value.last` and `q(omega^i) = - // batch_proof.q_x_value[i]` - for (i, value) in values.iter().enumerate().take(points_len) { - if !UnivariateKzgPCS::verify( - uni_verifier_param, - &batch_proof.q_x_commit, - &domain.element(i), - value, - &batch_proof.q_x_opens[i], - )? { - #[cfg(debug_assertion)] - println!("q(omega^{}) verification failed", i); - return Ok(false); - } - } - - if !UnivariateKzgPCS::verify( - uni_verifier_param, - &batch_proof.q_x_commit, - &r, - &values[points_len], - &batch_proof.q_x_opens[points_len], - )? { - #[cfg(debug_assertion)] - println!("q(r) verification failed"); - return Ok(false); - } - // 4. build `l(points)` which is a list of univariate polynomials that goes - // through the points - let uni_polys = build_l(points, &domain, true)?; - - // 5. get a point `p := l(r)` - let point: Vec = uni_polys.iter().map(|x| x.evaluate(&r)).collect(); - // 6. verifies `p` is valid against multilinear KZG proof - let res = verify_internal( - ml_verifier_param, - multi_commitment, - &point, - &values[points_len], - &batch_proof.proof, - )?; - #[cfg(debug_assertion)] - if !res { - println!("multilinear KZG verification failed"); - } - - end_timer!(verify_timer); - - Ok(res) -} -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - multilinear_kzg::{ - srs::MultilinearUniversalParams, - util::{compute_qx_degree, generate_evaluations_multi_poly}, - MultilinearKzgPCS, MultilinearKzgProof, - }, - prelude::UnivariateUniversalParams, - StructuredReferenceString, - }; - use arithmetic::get_batched_nv; - use ark_bls12_381::Bls12_381 as E; - use ark_ec::PairingEngine; - use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; - use ark_std::{ - log2, - rand::{CryptoRng, RngCore}, - test_rng, - vec::Vec, - UniformRand, - }; - - type Fr = ::Fr; - - fn test_multi_open_helper( - uni_params: &UnivariateUniversalParams, - ml_params: &MultilinearUniversalParams, - polys: &[Rc>], - rng: &mut R, - ) -> Result<(), PCSError> { - let merged_nv = get_batched_nv(polys[0].num_vars(), polys.len()); - let qx_degree = compute_qx_degree(merged_nv, polys.len()); - let padded_qx_degree = 1usize << log2(qx_degree); - - let (uni_ck, uni_vk) = uni_params.trim(padded_qx_degree)?; - let (ml_ck, ml_vk) = ml_params.trim(merged_nv)?; - - let mut points = Vec::new(); - for poly in polys.iter() { - let point = (0..poly.num_vars()) - .map(|_| Fr::rand(rng)) - .collect::>(); - points.push(point); - } - - let evals = generate_evaluations_multi_poly(polys, &points)?; - - let com = MultilinearKzgPCS::multi_commit(&(ml_ck.clone(), uni_ck.clone()), polys)?; - let (batch_proof, evaluations) = - multi_open_internal(&uni_ck, &ml_ck, polys, &com, &points)?; - - for (a, b) in evals.iter().zip(evaluations.iter()) { - assert_eq!(a, b) - } - - // good path - assert!(batch_verify_internal( - &uni_vk, - &ml_vk, - &com, - &points, - &evaluations, - &batch_proof, - )?); - - // bad commitment - assert!(!batch_verify_internal( - &uni_vk, - &ml_vk, - &Commitment(::G1Affine::default()), - &points, - &evaluations, - &batch_proof, - )?); - - // bad points - assert!( - batch_verify_internal(&uni_vk, &ml_vk, &com, &points[1..], &[], &batch_proof,).is_err() - ); - - // bad proof - assert!(batch_verify_internal( - &uni_vk, - &ml_vk, - &com, - &points, - &evaluations, - &MultilinearKzgBatchProof { - proof: MultilinearKzgProof { proofs: Vec::new() }, - q_x_commit: Commitment(::G1Affine::default()), - q_x_opens: vec![], - }, - ) - .is_err()); - - // bad value - let mut wrong_evals = evaluations.clone(); - wrong_evals[0] = Fr::default(); - assert!(!batch_verify_internal( - &uni_vk, - &ml_vk, - &com, - &points, - &wrong_evals, - &batch_proof - )?); - - // bad q(x) commit - let mut wrong_proof = batch_proof; - wrong_proof.q_x_commit = Commitment(::G1Affine::default()); - assert!(!batch_verify_internal( - &uni_vk, - &ml_vk, - &com, - &points, - &evaluations, - &wrong_proof, - )?); - Ok(()) - } - - #[test] - fn test_multi_open_internal() -> Result<(), PCSError> { - let mut rng = test_rng(); - - let uni_params = - UnivariateUniversalParams::::gen_srs_for_testing(&mut rng, 1usize << 15)?; - let ml_params = MultilinearUniversalParams::::gen_srs_for_testing(&mut rng, 15)?; - for num_poly in 1..10 { - for nv in 1..5 { - let polys1: Vec<_> = (0..num_poly) - .map(|_| Rc::new(DenseMultilinearExtension::rand(nv, &mut rng))) - .collect(); - test_multi_open_helper(&uni_params, &ml_params, &polys1, &mut rng)?; - } - } - - Ok(()) - } -} diff --git a/pcs/src/multilinear_kzg/batching/single_poly.rs b/pcs/src/multilinear_kzg/batching/single_poly.rs deleted file mode 100644 index 05f071c..0000000 --- a/pcs/src/multilinear_kzg/batching/single_poly.rs +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright (c) 2022 Espresso Systems (espressosys.com) -// This file is part of the Jellyfish library. - -// You should have received a copy of the MIT License -// along with the Jellyfish library. If not, see . - -use crate::{ - multilinear_kzg::{ - open_internal, - srs::{MultilinearProverParam, MultilinearVerifierParam}, - util::compute_w_circ_l, - verify_internal, MultilinearKzgBatchProof, - }, - prelude::{Commitment, UnivariateProverParam, UnivariateVerifierParam}, - univariate_kzg::UnivariateKzgPCS, - PCSError, PolynomialCommitmentScheme, -}; -use arithmetic::{build_l, get_uni_domain}; -use ark_ec::PairingEngine; -use ark_poly::{DenseMultilinearExtension, EvaluationDomain, MultilinearExtension, Polynomial}; -use ark_std::{end_timer, format, rc::Rc, start_timer, string::ToString, vec, vec::Vec}; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; -use transcript::IOPTranscript; - -/// Input -/// - the prover parameters for univariate KZG, -/// - the prover parameters for multilinear KZG, -/// - a single MLE, -/// - a commitment to the MLE -/// - and a list of points, -/// compute a multi-opening for this polynomial. -/// -/// For simplicity, this API requires each MLE to have only one point. If -/// the caller wish to use more than one points per MLE, it should be -/// handled at the caller layer. -/// -/// -/// Returns the proof, consists of -/// - the multilinear KZG opening -/// - the univariate KZG commitment to q(x) -/// - the openings and evaluations of q(x) at omega^i and r -/// -/// Steps: -/// 1. build `l(points)` which is a list of univariate polynomials that goes -/// through the points -/// 3. build `q(x)` which is a univariate polynomial `W circ l` -/// 4. commit to q(x) and sample r from transcript -/// transcript contains: w commitment, points, q(x)'s commitment -/// 5. build q(omega^i) and their openings -/// 6. build q(r) and its opening -/// 7. get a point `p := l(r)` -/// 8. output an opening of `w` over point `p` -/// 9. output `w(p)` -pub(crate) fn multi_open_same_poly_internal( - uni_prover_param: &UnivariateProverParam, - ml_prover_param: &MultilinearProverParam, - polynomial: &Rc>, - commitment: &Commitment, - points: &[Vec], -) -> Result<(MultilinearKzgBatchProof, Vec), PCSError> { - let open_timer = start_timer!(|| "multi open"); - - // =================================== - // Sanity checks on inputs - // =================================== - let points_len = points.len(); - if points_len == 0 { - return Err(PCSError::InvalidParameters("points is empty".to_string())); - } - - let num_var = polynomial.num_vars(); - for point in points.iter() { - if point.len() != num_var { - return Err(PCSError::InvalidParameters( - "points do not have same num_vars".to_string(), - )); - } - } - - let domain = get_uni_domain::(points_len)?; - - // 1. build `l(points)` which is a list of univariate polynomials that goes - // through the points - let uni_polys = build_l(points, &domain, false)?; - - // 3. build `q(x)` which is a univariate polynomial `W circ l` - let q_x = compute_w_circ_l(polynomial, &uni_polys, points.len(), false)?; - - // 4. commit to q(x) and sample r from transcript - // transcript contains: w commitment, points, q(x)'s commitment - let mut transcript = IOPTranscript::new(b"ml kzg"); - transcript.append_serializable_element(b"w", commitment)?; - for point in points { - transcript.append_serializable_element(b"w", point)?; - } - - let q_x_commit = UnivariateKzgPCS::::commit(uni_prover_param, &q_x)?; - transcript.append_serializable_element(b"q(x)", &q_x_commit)?; - let r = transcript.get_and_append_challenge(b"r")?; - // 5. build q(omega^i) and their openings - let mut q_x_opens = vec![]; - let mut q_x_evals = vec![]; - for i in 0..points_len { - let (q_x_open, q_x_eval) = - UnivariateKzgPCS::::open(uni_prover_param, &q_x, &domain.element(i))?; - q_x_opens.push(q_x_open); - q_x_evals.push(q_x_eval); - - #[cfg(feature = "extensive_sanity_checks")] - { - // sanity check - let point: Vec = uni_polys - .iter() - .map(|poly| poly.evaluate(&domain.element(i))) - .collect(); - let mle_eval = polynomial.evaluate(&point).unwrap(); - if mle_eval != q_x_eval { - return Err(PCSError::InvalidProver( - "Q(omega) does not match W(l(omega))".to_string(), - )); - } - } - } - - // 6. build q(r) and its opening - let (q_x_open, q_r_value) = UnivariateKzgPCS::::open(uni_prover_param, &q_x, &r)?; - q_x_opens.push(q_x_open); - q_x_evals.push(q_r_value); - - // 7. get a point `p := l(r)` - let point: Vec = uni_polys - .into_par_iter() - .map(|poly| poly.evaluate(&r)) - .collect(); - // 8. output an opening of `w` over point `p` - let (mle_opening, mle_eval) = open_internal(ml_prover_param, polynomial, &point)?; - - // 9. output value that is `w` evaluated at `p` (which should match `q(r)`) - if mle_eval != q_r_value { - return Err(PCSError::InvalidProver( - "Q(r) does not match W(l(r))".to_string(), - )); - } - end_timer!(open_timer); - Ok(( - MultilinearKzgBatchProof { - proof: mle_opening, - q_x_commit, - q_x_opens, - }, - q_x_evals, - )) -} - -/// Verifies that the `multi_commitment` is a valid commitment -/// to a list of MLEs for the given openings and evaluations in -/// the batch_proof. -/// -/// steps: -/// -/// 1. push w, points and q_com into transcript -/// 2. sample `r` from transcript -/// 3. check `q(r) == batch_proof.q_x_value.last` and -/// `q(omega^i) == batch_proof.q_x_value[i]` -/// 4. build `l(points)` which is a list of univariate -/// polynomials that goes through the points -/// 5. get a point `p := l(r)` -/// 6. verifies `p` is valid against multilinear KZG proof -#[allow(dead_code)] -pub(crate) fn batch_verify_same_poly_internal( - uni_verifier_param: &UnivariateVerifierParam, - ml_verifier_param: &MultilinearVerifierParam, - multi_commitment: &Commitment, - points: &[Vec], - values: &[E::Fr], - batch_proof: &MultilinearKzgBatchProof, -) -> Result { - let verify_timer = start_timer!(|| "batch verify"); - - // =================================== - // Sanity checks on inputs - // =================================== - let points_len = points.len(); - if points_len == 0 { - return Err(PCSError::InvalidParameters("points is empty".to_string())); - } - - // add one here because we also have q(r) and its opening - if points_len + 1 != batch_proof.q_x_opens.len() { - return Err(PCSError::InvalidParameters(format!( - "openings length {} does not match point length {}", - points_len + 1, - batch_proof.q_x_opens.len() - ))); - } - - if points_len + 1 != values.len() { - return Err(PCSError::InvalidParameters(format!( - "values length {} does not match point length {}", - points_len + 1, - values.len() - ))); - } - - let num_var = points[0].len(); - for point in points.iter().skip(1) { - if point.len() != num_var { - return Err(PCSError::InvalidParameters(format!( - "points do not have same num_vars ({} vs {})", - point.len(), - num_var, - ))); - } - } - - let domain = get_uni_domain::(points_len)?; - // 1. push w, points and q_com into transcript - let mut transcript = IOPTranscript::new(b"ml kzg"); - transcript.append_serializable_element(b"w", multi_commitment)?; - - for point in points { - transcript.append_serializable_element(b"w", point)?; - } - - transcript.append_serializable_element(b"q(x)", &batch_proof.q_x_commit)?; - // 2. sample `r` from transcript - let r = transcript.get_and_append_challenge(b"r")?; - // 3. check `q(r) == batch_proof.q_x_value.last` and `q(omega^i) = - // batch_proof.q_x_value[i]` - for (i, value) in values.iter().enumerate().take(points_len) { - if !UnivariateKzgPCS::verify( - uni_verifier_param, - &batch_proof.q_x_commit, - &domain.element(i), - value, - &batch_proof.q_x_opens[i], - )? { - #[cfg(debug_assertion)] - println!("q(omega^{}) verification failed", i); - return Ok(false); - } - } - - if !UnivariateKzgPCS::verify( - uni_verifier_param, - &batch_proof.q_x_commit, - &r, - &values[points_len], - &batch_proof.q_x_opens[points_len], - )? { - #[cfg(debug_assertion)] - println!("q(r) verification failed"); - return Ok(false); - } - // 4. build `l(points)` which is a list of univariate polynomials that goes - // through the points - let uni_polys = build_l(points, &domain, false)?; - - // 5. get a point `p := l(r)` - let point: Vec = uni_polys.iter().map(|x| x.evaluate(&r)).collect(); - // 6. verifies `p` is valid against multilinear KZG proof - let res = verify_internal( - ml_verifier_param, - multi_commitment, - &point, - &values[points_len], - &batch_proof.proof, - )?; - #[cfg(debug_assertion)] - if !res { - println!("multilinear KZG verification failed"); - } - - end_timer!(verify_timer); - - Ok(res) -} -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - multilinear_kzg::{ - srs::MultilinearUniversalParams, - util::{compute_qx_degree, generate_evaluations_single_poly}, - MultilinearKzgPCS, - }, - prelude::UnivariateUniversalParams, - StructuredReferenceString, - }; - use ark_bls12_381::Bls12_381 as E; - use ark_ec::PairingEngine; - use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; - use ark_std::{ - log2, - rand::{CryptoRng, RngCore}, - test_rng, - vec::Vec, - UniformRand, - }; - type Fr = ::Fr; - - fn test_same_poly_multi_open_internal_helper( - uni_params: &UnivariateUniversalParams, - ml_params: &MultilinearUniversalParams, - poly: &Rc>, - point_len: usize, - rng: &mut R, - ) -> Result<(), PCSError> { - let nv = poly.num_vars; - let qx_degree = compute_qx_degree(nv, point_len); - let padded_qx_degree = 1usize << log2(qx_degree); - - let (uni_ck, uni_vk) = uni_params.trim(padded_qx_degree)?; - let (ml_ck, ml_vk) = ml_params.trim(nv)?; - - let mut points = Vec::new(); - let mut eval = Vec::new(); - for _ in 0..point_len { - let point = (0..nv).map(|_| Fr::rand(rng)).collect::>(); - eval.push(poly.evaluate(&point).unwrap()); - points.push(point); - } - - let evals = generate_evaluations_single_poly(poly, &points)?; - let com = MultilinearKzgPCS::commit(&(ml_ck.clone(), uni_ck.clone()), poly)?; - let (batch_proof, evaluations) = - multi_open_same_poly_internal(&uni_ck, &ml_ck, poly, &com, &points)?; - - for (a, b) in evals.iter().zip(evaluations.iter()) { - assert_eq!(a, b) - } - - // good path - assert!(batch_verify_same_poly_internal( - &uni_vk, - &ml_vk, - &com, - &points, - &evaluations, - &batch_proof, - )?); - - Ok(()) - } - - #[test] - fn test_same_poly_multi_open_internal() -> Result<(), PCSError> { - let mut rng = test_rng(); - - let uni_params = - UnivariateUniversalParams::::gen_srs_for_testing(&mut rng, 1usize << 15)?; - let ml_params = MultilinearUniversalParams::::gen_srs_for_testing(&mut rng, 15)?; - for nv in 1..10 { - for point_len in 1..10 { - // normal polynomials - let polys1 = Rc::new(DenseMultilinearExtension::rand(nv, &mut rng)); - test_same_poly_multi_open_internal_helper( - &uni_params, - &ml_params, - &polys1, - point_len, - &mut rng, - )?; - } - } - Ok(()) - } -} diff --git a/pcs/src/multilinear_kzg/util.rs b/pcs/src/multilinear_kzg/util.rs deleted file mode 100644 index a475b3f..0000000 --- a/pcs/src/multilinear_kzg/util.rs +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright (c) 2022 Espresso Systems (espressosys.com) -// This file is part of the Jellyfish library. - -// You should have received a copy of the MIT License -// along with the Jellyfish library. If not, see . - -//! Useful utilities for KZG PCS -use crate::prelude::PCSError; -use arithmetic::evaluate_no_par; -use ark_ff::PrimeField; -use ark_poly::{ - univariate::DensePolynomial, DenseMultilinearExtension, EvaluationDomain, Evaluations, - MultilinearExtension, Polynomial, Radix2EvaluationDomain, -}; -use ark_std::{end_timer, format, log2, start_timer, string::ToString, vec::Vec}; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; - -/// For an MLE w with `mle_num_vars` variables, and `point_len` number of -/// points, compute the degree of the univariate polynomial `q(x):= w(l(x))` -/// where l(x) is a list of polynomials that go through all points. -// uni_degree is computed as `mle_num_vars * point_len`: -// - each l(x) is of degree `point_len` -// - mle has degree one -// - worst case is `\prod_{i=0}^{mle_num_vars-1} l_i(x) < point_len * mle_num_vars` -#[inline] -pub fn compute_qx_degree(mle_num_vars: usize, point_len: usize) -> usize { - mle_num_vars * ((1 << log2(point_len)) - 1) + 1 -} - -/// Compute W \circ l. -/// -/// Given an MLE W, and a list of univariate polynomials l, generate the -/// univariate polynomial that composes W with l. -/// -/// Returns an error if l's length does not matches number of variables in W. -pub(crate) fn compute_w_circ_l( - w: &DenseMultilinearExtension, - l: &[DensePolynomial], - num_points: usize, - with_suffix: bool, -) -> Result, PCSError> { - let timer = start_timer!(|| "compute W \\circ l"); - - if w.num_vars != l.len() { - return Err(PCSError::InvalidParameters(format!( - "l's length ({}) does not match num_variables ({})", - l.len(), - w.num_vars(), - ))); - } - let uni_degree = if with_suffix { - compute_qx_degree(w.num_vars() + log2(num_points) as usize, num_points) - } else { - compute_qx_degree(w.num_vars(), num_points) - }; - - let domain = match Radix2EvaluationDomain::::new(uni_degree) { - Some(p) => p, - None => { - return Err(PCSError::InvalidParameters( - "failed to build radix 2 domain".to_string(), - )) - }, - }; - - let step = start_timer!(|| format!("compute eval {}-dim domain", domain.size())); - let res_eval = (0..domain.size()) - .into_par_iter() - .map(|i| { - let l_eval: Vec = l.iter().map(|x| x.evaluate(&domain.element(i))).collect(); - evaluate_no_par(w, &l_eval) - }) - .collect(); - end_timer!(step); - - let evaluation = Evaluations::from_vec_and_domain(res_eval, domain); - let res = evaluation.interpolate(); - - end_timer!(timer); - Ok(res) -} - -/// Input a list of multilinear polynomials and a list of points, -/// generate a list of evaluations. -// Note that this function is only used for testing verifications. -// In practice verifier does not see polynomials, and the `mle_values` -// are included in the `batch_proof`. -#[cfg(test)] -pub(crate) fn generate_evaluations_multi_poly( - polynomials: &[std::rc::Rc>], - points: &[Vec], -) -> Result, PCSError> { - use arithmetic::{build_l, get_uni_domain, merge_polynomials}; - - if polynomials.len() != points.len() { - return Err(PCSError::InvalidParameters( - "polynomial length does not match point length".to_string(), - )); - } - let uni_poly_degree = points.len(); - let merge_poly = merge_polynomials(polynomials)?; - - let domain = get_uni_domain::(uni_poly_degree)?; - let uni_polys = build_l(points, &domain, true)?; - let mut mle_values = vec![]; - - for i in 0..uni_poly_degree { - let point: Vec = uni_polys - .iter() - .map(|poly| poly.evaluate(&domain.element(i))) - .collect(); - - let mle_value = merge_poly.evaluate(&point).unwrap(); - mle_values.push(mle_value) - } - Ok(mle_values) -} - -/// Input a list of multilinear polynomials and a list of points, -/// generate a list of evaluations. -// Note that this function is only used for testing verifications. -// In practice verifier does not see polynomials, and the `mle_values` -// are included in the `batch_proof`. -#[cfg(test)] -pub(crate) fn generate_evaluations_single_poly( - polynomial: &std::rc::Rc>, - points: &[Vec], -) -> Result, PCSError> { - use arithmetic::{build_l, get_uni_domain}; - - let uni_poly_degree = points.len(); - - let domain = get_uni_domain::(uni_poly_degree)?; - let uni_polys = build_l(points, &domain, false)?; - let mut mle_values = vec![]; - - for i in 0..uni_poly_degree { - let point: Vec = uni_polys - .iter() - .map(|poly| poly.evaluate(&domain.element(i))) - .collect(); - - let mle_value = polynomial.evaluate(&point).unwrap(); - mle_values.push(mle_value) - } - Ok(mle_values) -} - -#[cfg(test)] -mod test { - use super::*; - use arithmetic::{build_l, get_uni_domain, merge_polynomials}; - use ark_bls12_381::Fr; - use ark_poly::UVPolynomial; - use ark_std::{One, Zero}; - use std::rc::Rc; - - #[test] - fn test_w_circ_l() -> Result<(), PCSError> { - test_w_circ_l_helper::() - } - - fn test_w_circ_l_helper() -> Result<(), PCSError> { - { - // Example from page 53: - // W = 3x1x2 + 2x2 whose evaluations are - // 0, 0 |-> 0 - // 1, 0 |-> 0 - // 0, 1 |-> 2 - // 1, 1 |-> 5 - let w_eval = vec![F::zero(), F::zero(), F::from(2u64), F::from(5u64)]; - let w = DenseMultilinearExtension::from_evaluations_vec(2, w_eval); - - // l0 = t + 2 - // l1 = -2t + 4 - let l0 = DensePolynomial::from_coefficients_vec(vec![F::from(2u64), F::one()]); - let l1 = DensePolynomial::from_coefficients_vec(vec![F::from(4u64), -F::from(2u64)]); - - // res = -6t^2 - 4t + 32 - let res = compute_w_circ_l(&w, [l0, l1].as_ref(), 4, false)?; - let res_rec = DensePolynomial::from_coefficients_vec(vec![ - F::from(32u64), - -F::from(4u64), - -F::from(6u64), - ]); - assert_eq!(res, res_rec); - } - { - // A random example - // W = x1x2x3 - 2x1x2 + 3x2x3 - 4x1x3 + 5x1 - 6x2 + 7x3 - // 0, 0, 0 |-> 0 - // 1, 0, 0 |-> 5 - // 0, 1, 0 |-> -6 - // 1, 1, 0 |-> -3 - // 0, 0, 1 |-> 7 - // 1, 0, 1 |-> 8 - // 0, 1, 1 |-> 4 - // 1, 1, 1 |-> 4 - let w_eval = vec![ - F::zero(), - F::from(5u64), - -F::from(6u64), - -F::from(3u64), - F::from(7u64), - F::from(8u64), - F::from(4u64), - F::from(4u64), - ]; - let w = DenseMultilinearExtension::from_evaluations_vec(3, w_eval); - - // l0 = t + 2 - // l1 = 3t - 4 - // l2 = -5t + 6 - let l0 = DensePolynomial::from_coefficients_vec(vec![F::from(2u64), F::one()]); - let l1 = DensePolynomial::from_coefficients_vec(vec![-F::from(4u64), F::from(3u64)]); - let l2 = DensePolynomial::from_coefficients_vec(vec![F::from(6u64), -F::from(5u64)]); - let res = compute_w_circ_l(&w, [l0, l1, l2].as_ref(), 8, false)?; - - // res = -15t^3 - 23t^2 + 130t - 76 - let res_rec = DensePolynomial::from_coefficients_vec(vec![ - -F::from(76u64), - F::from(130u64), - -F::from(23u64), - -F::from(15u64), - ]); - - assert_eq!(res, res_rec); - } - Ok(()) - } - - #[test] - fn test_w_circ_l_with_prefix() -> Result<(), PCSError> { - test_w_circ_l_with_prefix_helper::() - } - - fn test_w_circ_l_with_prefix_helper() -> Result<(), PCSError> { - { - // Example from page 53: - // W = 3x1x2 + 2x2 whose evaluations are - // 0, 0 |-> 0 - // 1, 0 |-> 0 - // 0, 1 |-> 2 - // 1, 1 |-> 5 - let w_eval = vec![F::zero(), F::zero(), F::from(2u64), F::from(5u64)]; - let w = DenseMultilinearExtension::from_evaluations_vec(2, w_eval); - - // l0 = t + 2 - // l1 = -2t + 4 - let l0 = DensePolynomial::from_coefficients_vec(vec![F::from(2u64), F::one()]); - let l1 = DensePolynomial::from_coefficients_vec(vec![F::from(4u64), -F::from(2u64)]); - - // res = -6t^2 - 4t + 32 - let res = compute_w_circ_l(&w, [l0, l1].as_ref(), 4, true)?; - let res_rec = DensePolynomial::from_coefficients_vec(vec![ - F::from(32u64), - -F::from(4u64), - -F::from(6u64), - ]); - assert_eq!(res, res_rec); - } - { - // A random example - // W = x1x2x3 - 2x1x2 + 3x2x3 - 4x1x3 + 5x1 - 6x2 + 7x3 - // 0, 0, 0 |-> 0 - // 1, 0, 0 |-> 5 - // 0, 1, 0 |-> -6 - // 1, 1, 0 |-> -3 - // 0, 0, 1 |-> 7 - // 1, 0, 1 |-> 8 - // 0, 1, 1 |-> 4 - // 1, 1, 1 |-> 4 - let w_eval = vec![ - F::zero(), - F::from(5u64), - -F::from(6u64), - -F::from(3u64), - F::from(7u64), - F::from(8u64), - F::from(4u64), - F::from(4u64), - ]; - let w = DenseMultilinearExtension::from_evaluations_vec(3, w_eval); - - // l0 = t + 2 - // l1 = 3t - 4 - // l2 = -5t + 6 - let l0 = DensePolynomial::from_coefficients_vec(vec![F::from(2u64), F::one()]); - let l1 = DensePolynomial::from_coefficients_vec(vec![-F::from(4u64), F::from(3u64)]); - let l2 = DensePolynomial::from_coefficients_vec(vec![F::from(6u64), -F::from(5u64)]); - let res = compute_w_circ_l(&w, [l0, l1, l2].as_ref(), 8, true)?; - - // res = -15t^3 - 23t^2 + 130t - 76 - let res_rec = DensePolynomial::from_coefficients_vec(vec![ - -F::from(76u64), - F::from(130u64), - -F::from(23u64), - -F::from(15u64), - ]); - - assert_eq!(res, res_rec); - } - Ok(()) - } - - #[test] - fn test_qx() -> Result<(), PCSError> { - // Example from page 53: - // W1 = 3x1x2 + 2x2 - let w_eval = vec![Fr::zero(), Fr::from(2u64), Fr::zero(), Fr::from(5u64)]; - let w = Rc::new(DenseMultilinearExtension::from_evaluations_vec(2, w_eval)); - - let r = Fr::from(42u64); - - // point 1 is [1, 2] - let point1 = vec![Fr::from(1u64), Fr::from(2u64)]; - - // point 2 is [3, 4] - let point2 = vec![Fr::from(3u64), Fr::from(4u64)]; - - // point 3 is [5, 6] - let point3 = vec![Fr::from(5u64), Fr::from(6u64)]; - - { - let domain = get_uni_domain::(2)?; - let l = build_l(&[point1.clone(), point2.clone()], &domain, false)?; - - let q_x = compute_w_circ_l(&w, &l, 2, false)?; - - let point: Vec = l.iter().map(|poly| poly.evaluate(&r)).collect(); - - assert_eq!( - q_x.evaluate(&r), - w.evaluate(&point).unwrap(), - "q(r) != w(l(r))" - ); - } - - { - let domain = get_uni_domain::(3)?; - - let l = build_l(&[point1, point2, point3], &domain, false)?; - let q_x = compute_w_circ_l(&w, &l, 3, false)?; - - let point: Vec = vec![l[0].evaluate(&r), l[1].evaluate(&r)]; - - assert_eq!( - q_x.evaluate(&r), - w.evaluate(&point).unwrap(), - "q(r) != w(l(r))" - ); - } - Ok(()) - } - - #[test] - fn test_qx_with_prefix() -> Result<(), PCSError> { - // Example from page 53: - // W1 = 3x1x2 + 2x2 - let w_eval = vec![Fr::zero(), Fr::from(2u64), Fr::zero(), Fr::from(5u64)]; - let w1 = Rc::new(DenseMultilinearExtension::from_evaluations_vec(2, w_eval)); - - // W2 = x1x2 + x1 - let w_eval = vec![Fr::zero(), Fr::zero(), Fr::from(1u64), Fr::from(2u64)]; - let w2 = Rc::new(DenseMultilinearExtension::from_evaluations_vec(2, w_eval)); - - // W3 = x1 + x2 - let w_eval = vec![Fr::zero(), Fr::one(), Fr::from(1u64), Fr::from(2u64)]; - let w3 = Rc::new(DenseMultilinearExtension::from_evaluations_vec(2, w_eval)); - - let r = Fr::from(42u64); - - // point 1 is [1, 2] - let point1 = vec![Fr::from(1u64), Fr::from(2u64)]; - - // point 2 is [3, 4] - let point2 = vec![Fr::from(3u64), Fr::from(4u64)]; - - // point 3 is [5, 6] - let point3 = vec![Fr::from(5u64), Fr::from(6u64)]; - - { - let domain = get_uni_domain::(2)?; - // w = (3x1x2 + 2x2)(1-x0) + (x1x2 + x1)x0 - // with evaluations: [0,2,0,5,0,0,1,2] - let w = merge_polynomials(&[w1.clone(), w2.clone()])?; - - let l = build_l(&[point1.clone(), point2.clone()], &domain, true)?; - - // sage: P. = PolynomialRing(ZZ) - // sage: l0 = -1/2 * x + 1/2 - // sage: l1 = -x + 2 - // sage: l2 = -x + 3 - // sage: w = (3 * l1 * l2 + 2 * l2) * (1-l0) + (l1 * l2 + l1) * l0 - // sage: w - // x^3 - 7/2*x^2 - 7/2*x + 16 - // - // q(x) = x^3 - 7/2*x^2 - 7/2*x + 16 - let q_x = compute_w_circ_l(&w, &l, 2, true)?; - - let point: Vec = l.iter().map(|poly| poly.evaluate(&r)).collect(); - - assert_eq!( - q_x.evaluate(&r), - w.evaluate(&point).unwrap(), - "q(r) != w(l(r))" - ); - } - - { - let domain = get_uni_domain::(3)?; - let w = merge_polynomials(&[w1, w2, w3])?; - - let l = build_l(&[point1, point2, point3], &domain, true)?; - let q_x = compute_w_circ_l(&w, &l, 3, true)?; - - let point: Vec = vec![ - l[0].evaluate(&r), - l[1].evaluate(&r), - l[2].evaluate(&r), - l[3].evaluate(&r), - ]; - - assert_eq!( - q_x.evaluate(&r), - w.evaluate(&point).unwrap(), - "q(r) != w(l(r))" - ); - } - Ok(()) - } -} diff --git a/poly-iop/src/prelude.rs b/poly-iop/src/prelude.rs deleted file mode 100644 index 36ebba1..0000000 --- a/poly-iop/src/prelude.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub use crate::{ - errors::PolyIOPErrors, perm_check::PermutationCheck, prod_check::ProductCheck, - sum_check::SumCheck, utils::*, zero_check::ZeroCheck, PolyIOP, -}; diff --git a/scripts/run_benchmarks.m4 b/scripts/run_benchmarks.m4 deleted file mode 100755 index 26df9f8..0000000 --- a/scripts/run_benchmarks.m4 +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# m4_ignore( -echo "This is just a script template, not the script (yet) - pass it to 'argbash' to fix this." >&2 -exit 11 #)Created by argbash-init v2.10.0 -# ARG_OPTIONAL_BOOLEAN([asm]) -# ARG_OPTIONAL_BOOLEAN([multi_threads]) -# ARG_HELP([]) -# ARGBASH_GO - -# [ <-- needed because of Argbash - -if [ "$_arg_multi_threads" = on ] -then - echo "Multi-threads: ON" - # Do nothing -else - echo "Multi-threads: OFF" - export RAYON_NUM_THREADS=1 -fi - -if [ "$_arg_asm" = on ] -then - echo "Asm feature: ON" - export RUSTFLAGS="-C target-feature=+bmi2,+adx" -else - echo "Asm feature: OFF" - # Do nothing -fi - -# Run the benchmark binary -cargo +nightly bench - - -# ^^^ TERMINATE YOUR CODE BEFORE THE BOTTOM ARGBASH MARKER ^^^ - -# ] <-- needed because of Argbash diff --git a/scripts/run_benchmarks.sh b/scripts/run_benchmarks.sh index 72d554a..4a64a3e 100755 --- a/scripts/run_benchmarks.sh +++ b/scripts/run_benchmarks.sh @@ -1,106 +1,12 @@ -#!/usr/bin/env bash +#!/bin/bash -# Created by argbash-init v2.10.0 -# ARG_OPTIONAL_BOOLEAN([asm]) -# ARG_OPTIONAL_BOOLEAN([multi_threads]) -# ARG_HELP([]) -# ARGBASH_GO() -# needed because of Argbash --> m4_ignore([ -### START OF CODE GENERATED BY Argbash v2.10.0 one line above ### -# Argbash is a bash code generator used to get arguments parsing right. -# Argbash is FREE SOFTWARE, see https://argbash.io for more info - - -die() -{ - local _ret="${2:-1}" - test "${_PRINT_HELP:-no}" = yes && print_help >&2 - echo "$1" >&2 - exit "${_ret}" -} - - -begins_with_short_option() -{ - local first_option all_short_options='h' - first_option="${1:0:1}" - test "$all_short_options" = "${all_short_options/$first_option/}" && return 1 || return 0 -} - -# THE DEFAULTS INITIALIZATION - OPTIONALS -_arg_asm="off" -_arg_multi_threads="off" - - -print_help() -{ - printf '%s\n' "" - printf 'Usage: %s [--(no-)asm] [--(no-)multi_threads] [-h|--help]\n' "$0" - printf '\t%s\n' "-h, --help: Prints help" -} - - -parse_commandline() -{ - while test $# -gt 0 - do - _key="$1" - case "$_key" in - --no-asm|--asm) - _arg_asm="on" - test "${1:0:5}" = "--no-" && _arg_asm="off" - ;; - --no-multi_threads|--multi_threads) - _arg_multi_threads="on" - test "${1:0:5}" = "--no-" && _arg_multi_threads="off" - ;; - -h|--help) - print_help - exit 0 - ;; - -h*) - print_help - exit 0 - ;; - *) - _PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1 - ;; - esac - shift - done -} - -parse_commandline "$@" - -# OTHER STUFF GENERATED BY Argbash - -### END OF CODE GENERATED BY Argbash (sortof) ### ]) -# [ <-- needed because of Argbash - -cargo clean - -if [ "$_arg_multi_threads" = on ] -then - echo "Multi-threads: ON" - # Do nothing -else - echo "Multi-threads: OFF" - export RAYON_NUM_THREADS=1 -fi - -if [ "$_arg_asm" = on ] -then - echo "Asm feature: ON" - export RUSTFLAGS="-C target-feature=+bmi2,+adx" -else - echo "Asm feature: OFF" - # Do nothing -fi +cd hyperplonk # Run the benchmark binary -cargo bench - - -# ^^^ TERMINATE YOUR CODE BEFORE THE BOTTOM ARGBASH MARKER ^^^ - -# ] <-- needed because of Argbash +cargo bench 64 --no-default-features --features=bench +cargo bench 32 --no-default-features --features=bench +cargo bench 16 --no-default-features --features=bench +cargo bench 8 --no-default-features --features=bench +cargo bench 4 --no-default-features --features=bench +cargo bench 2 --no-default-features --features=bench +cargo bench 1 --no-default-features --features=bench \ No newline at end of file diff --git a/poly-iop/Cargo.toml b/subroutines/Cargo.toml similarity index 76% rename from poly-iop/Cargo.toml rename to subroutines/Cargo.toml index 8f9252d..3599eaa 100644 --- a/poly-iop/Cargo.toml +++ b/subroutines/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "poly-iop" +name = "subroutines" version = "0.1.0" edition = "2021" @@ -17,36 +17,39 @@ ark-bls12-381 = { version = "0.3.0", default-features = false, features = [ "cur rand_chacha = { version = "0.3.0", default-features = false } displaydoc = { version = "0.2.3", default-features = false } rayon = { version = "1.5.2", default-features = false, optional = true } +derivative = { version = "2", features = ["use_core"] } +itertools = { version = "0.10.4", optional = true } transcript = { path = "../transcript" } arithmetic = { path = "../arithmetic" } -pcs = { path = "../pcs" } util = { path = "../util" } -[dev-dependencies] -ark-ec = { version = "^0.3.0", default-features = false } +# # Benchmarks +# [[bench]] +# name = "poly-iop-benches" +# path = "benches/iop_bench.rs" +# harness = false # Benchmarks [[bench]] -name = "poly-iop-benches" -path = "benches/bench.rs" +name = "pcs-benches" +path = "benches/pcs_bench.rs" harness = false [features] # default = [ "parallel", "print-trace" ] default = [ "parallel" ] # extensive sanity checks that are useful for debugging -extensive_sanity_checks = [ - "pcs/extensive_sanity_checks", - ] +extensive_sanity_checks = [ ] parallel = [ "rayon", - "arithmetic/parallel", + "itertools", "ark-std/parallel", "ark-ff/parallel", "ark-poly/parallel", - "pcs/parallel", - "util/parallel" + "ark-ec/parallel", + "util/parallel", + "arithmetic/parallel", ] print-trace = [ "arithmetic/print-trace", diff --git a/poly-iop/benches/bench.rs b/subroutines/benches/iop_bench.rs similarity index 91% rename from poly-iop/benches/bench.rs rename to subroutines/benches/iop_bench.rs index e7e6061..7547aac 100644 --- a/poly-iop/benches/bench.rs +++ b/subroutines/benches/iop_bench.rs @@ -2,11 +2,13 @@ use arithmetic::{identity_permutation_mle, VPAuxInfo, VirtualPolynomial}; use ark_bls12_381::{Bls12_381, Fr}; use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; use ark_std::test_rng; -use pcs::{prelude::MultilinearKzgPCS, PolynomialCommitmentScheme}; -use poly_iop::prelude::{ - PermutationCheck, PolyIOP, PolyIOPErrors, ProductCheck, SumCheck, ZeroCheck, -}; use std::{marker::PhantomData, rc::Rc, time::Instant}; +use subroutines::{ + pcs::{prelude::MultilinearKzgPCS, PolynomialCommitmentScheme}, + poly_iop::prelude::{ + PermutationCheck, PolyIOP, PolyIOPErrors, ProductCheck, SumCheck, ZeroCheck, + }, +}; type KZG = MultilinearKzgPCS; @@ -57,16 +59,12 @@ fn bench_sum_check() -> Result<(), PolyIOPErrors> { for _ in 0..repetition { let mut transcript = as SumCheck>::init_transcript(); - let subclaim = as SumCheck>::verify( + let _subclaim = as SumCheck>::verify( asserted_sum, &proof, &poly_info, &mut transcript, )?; - assert!( - poly.evaluate(&subclaim.point).unwrap() == subclaim.expected_evaluation, - "wrong subclaim" - ); } println!( "sum check verification time for {} variables and {} degree: {} ns", @@ -115,12 +113,8 @@ fn bench_zero_check() -> Result<(), PolyIOPErrors> { let start = Instant::now(); let mut transcript = as ZeroCheck>::init_transcript(); transcript.append_message(b"testing", b"initializing transcript for testing")?; - let zero_subclaim = + let _zero_subclaim = as ZeroCheck>::verify(&proof, &poly_info, &mut transcript)?; - assert!( - poly.evaluate(&zero_subclaim.point)? == zero_subclaim.expected_evaluation, - "wrong subclaim" - ); println!( "zero check verification time for {} variables and {} degree: {} ns", nv, @@ -140,7 +134,7 @@ fn bench_permutation_check() -> Result<(), PolyIOPErrors> { for nv in 4..20 { let srs = KZG::gen_srs_for_testing(&mut rng, nv + 1)?; - let (pcs_param, _) = KZG::trim(&srs, nv + 1, Some(nv + 1))?; + let (pcs_param, _) = KZG::trim(&srs, None, Some(nv + 1))?; let repetition = if nv < 10 { 100 @@ -211,7 +205,7 @@ fn bench_prod_check() -> Result<(), PolyIOPErrors> { for nv in 4..20 { let srs = KZG::gen_srs_for_testing(&mut rng, nv + 1)?; - let (pcs_param, _) = KZG::trim(&srs, nv + 1, Some(nv + 1))?; + let (pcs_param, _) = KZG::trim(&srs, None, Some(nv + 1))?; let repetition = if nv < 10 { 100 diff --git a/pcs/benches/bench.rs b/subroutines/benches/pcs_bench.rs similarity index 89% rename from pcs/benches/bench.rs rename to subroutines/benches/pcs_bench.rs index c9e40b0..87bb49f 100644 --- a/pcs/benches/bench.rs +++ b/subroutines/benches/pcs_bench.rs @@ -2,11 +2,11 @@ use ark_bls12_381::{Bls12_381, Fr}; use ark_ff::UniformRand; use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; use ark_std::{rc::Rc, test_rng}; -use pcs::{ +use std::time::Instant; +use subroutines::pcs::{ prelude::{MultilinearKzgPCS, PCSError, PolynomialCommitmentScheme}, StructuredReferenceString, }; -use std::time::Instant; fn main() -> Result<(), PCSError> { bench_pcs() @@ -16,22 +16,19 @@ fn bench_pcs() -> Result<(), PCSError> { let mut rng = test_rng(); // normal polynomials - let uni_params = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 18)?; + let uni_params = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 24)?; - for nv in 4..19 { + for nv in 4..25 { let repetition = if nv < 10 { - 100 + 10 } else if nv < 20 { - 50 + 5 } else { - 10 + 2 }; let poly = Rc::new(DenseMultilinearExtension::rand(nv, &mut rng)); - let (ml_ck, ml_vk) = uni_params.0.trim(nv)?; - let (uni_ck, uni_vk) = uni_params.1.trim(nv)?; - let ck = (ml_ck, uni_ck); - let vk = (ml_vk, uni_vk); + let (ck, vk) = uni_params.trim(nv)?; let point: Vec<_> = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); diff --git a/subroutines/src/lib.rs b/subroutines/src/lib.rs new file mode 100644 index 0000000..d9fd674 --- /dev/null +++ b/subroutines/src/lib.rs @@ -0,0 +1,5 @@ +pub mod pcs; +pub mod poly_iop; + +pub use pcs::prelude::*; +pub use poly_iop::prelude::*; diff --git a/pcs/src/errors.rs b/subroutines/src/pcs/errors.rs similarity index 100% rename from pcs/src/errors.rs rename to subroutines/src/pcs/errors.rs diff --git a/pcs/src/lib.rs b/subroutines/src/pcs/mod.rs similarity index 76% rename from pcs/src/lib.rs rename to subroutines/src/pcs/mod.rs index 4331616..7a3eddb 100644 --- a/pcs/src/lib.rs +++ b/subroutines/src/pcs/mod.rs @@ -11,6 +11,7 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::{CryptoRng, RngCore}; use errors::PCSError; use std::{borrow::Borrow, fmt::Debug, hash::Hash}; +use transcript::IOPTranscript; /// This trait defines APIs for polynomial commitment schemes. /// Note that for our usage of PCS, we do not require the hiding property. @@ -35,12 +36,10 @@ pub trait PolynomialCommitmentScheme { type Evaluation: Field; /// Commitments type Commitment: Clone + CanonicalSerialize + CanonicalDeserialize + Debug + PartialEq + Eq; - /// Batch commitments - type BatchCommitment: Clone + CanonicalSerialize + CanonicalDeserialize + Debug + PartialEq + Eq; /// Proofs type Proof: Clone + CanonicalSerialize + CanonicalDeserialize + Debug + PartialEq + Eq; /// Batch proofs - type BatchProof: Clone + CanonicalSerialize + CanonicalDeserialize + Debug + PartialEq + Eq; + type BatchProof; /// Build SRS for testing. /// @@ -67,7 +66,7 @@ pub trait PolynomialCommitmentScheme { /// ..)` etc. fn trim( srs: impl Borrow, - supported_degree: usize, + supported_degree: Option, supported_num_vars: Option, ) -> Result<(Self::ProverParam, Self::VerifierParam), PCSError>; @@ -85,12 +84,6 @@ pub trait PolynomialCommitmentScheme { poly: &Self::Polynomial, ) -> Result; - /// Generate a commitment for a list of polynomials - fn multi_commit( - prover_param: impl Borrow, - polys: &[Self::Polynomial], - ) -> Result; - /// On input a polynomial `p` and a point `point`, outputs a proof for the /// same. fn open( @@ -102,20 +95,16 @@ pub trait PolynomialCommitmentScheme { /// Input a list of multilinear extensions, and a same number of points, and /// a transcript, compute a multi-opening for all the polynomials. fn multi_open( - prover_param: impl Borrow, - multi_commitment: &Self::BatchCommitment, - polynomials: &[Self::Polynomial], - points: &[Self::Point], - ) -> Result<(Self::BatchProof, Vec), PCSError>; - - /// Input a multilinear extension, and a number of points, and - /// a transcript, compute a multi-opening for all the polynomials. - fn multi_open_single_poly( - prover_param: impl Borrow, - commitment: &Self::Commitment, - polynomials: &Self::Polynomial, - points: &[Self::Point], - ) -> Result<(Self::BatchProof, Vec), PCSError>; + _prover_param: impl Borrow, + _polynomials: &[Self::Polynomial], + _points: &[Self::Point], + _evals: &[Self::Evaluation], + _transcript: &mut IOPTranscript, + ) -> Result { + // the reason we use unimplemented!() is to enable developers to implement the + // trait without always implementing the batching APIs. + unimplemented!() + } /// Verifies that `value` is the evaluation at `x` of the polynomial /// committed inside `comm`. @@ -129,24 +118,17 @@ pub trait PolynomialCommitmentScheme { /// Verifies that `value_i` is the evaluation at `x_i` of the polynomial /// `poly_i` committed inside `comm`. - fn batch_verify( - verifier_param: &Self::VerifierParam, - multi_commitment: &Self::BatchCommitment, - points: &[Self::Point], - values: &[E::Fr], - batch_proof: &Self::BatchProof, - rng: &mut R, - ) -> Result; - - /// Verifies that `value_i` is the evaluation at `x_i` of the polynomial - /// `poly` committed inside `comm`. - fn batch_verify_single_poly( - verifier_param: &Self::VerifierParam, - commitment: &Self::Commitment, - points: &[Self::Point], - values: &[E::Fr], - batch_proof: &Self::BatchProof, - ) -> Result; + fn batch_verify( + _verifier_param: &Self::VerifierParam, + _commitments: &[Self::Commitment], + _points: &[Self::Point], + _batch_proof: &Self::BatchProof, + _transcript: &mut IOPTranscript, + ) -> Result { + // the reason we use unimplemented!() is to enable developers to implement the + // trait without always implementing the batching APIs. + unimplemented!() + } } /// API definitions for structured reference string diff --git a/subroutines/src/pcs/multilinear_kzg/batching.rs b/subroutines/src/pcs/multilinear_kzg/batching.rs new file mode 100644 index 0000000..17b855b --- /dev/null +++ b/subroutines/src/pcs/multilinear_kzg/batching.rs @@ -0,0 +1,329 @@ +//! Sumcheck based batch opening and verify commitment. +// TODO: refactoring this code to somewhere else +// currently IOP depends on PCS because perm check requires commitment. +// The sumcheck based batch opening therefore cannot stay in the PCS repo -- +// which creates a cyclic dependency. + +use crate::{ + pcs::{ + multilinear_kzg::util::eq_eval, + prelude::{Commitment, PCSError}, + PolynomialCommitmentScheme, + }, + poly_iop::{prelude::SumCheck, PolyIOP}, + IOPProof, +}; +use arithmetic::{ + build_eq_x_r_vec, fix_last_variables, DenseMultilinearExtension, VPAuxInfo, VirtualPolynomial, +}; +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_std::{end_timer, log2, start_timer, One, Zero}; +use std::{marker::PhantomData, rc::Rc}; +use transcript::IOPTranscript; + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct BatchProof +where + E: PairingEngine, + PCS: PolynomialCommitmentScheme, +{ + /// A sum check proof proving tilde g's sum + pub(crate) sum_check_proof: IOPProof, + /// f_i(point_i) + pub f_i_eval_at_point_i: Vec, + /// proof for g'(a_2) + pub(crate) g_prime_proof: PCS::Proof, +} + +/// Steps: +/// 1. get challenge point t from transcript +/// 2. build eq(t,i) for i in [0..k] +/// 3. build \tilde g(i, b) = eq(t, i) * f_i(b) +/// 4. compute \tilde eq +/// 5. run sumcheck on \tilde eq * \tilde g(i, b) +/// 6. build g'(a2) where (a1, a2) is the sumcheck's point +pub(crate) fn multi_open_internal( + prover_param: &PCS::ProverParam, + polynomials: &[PCS::Polynomial], + points: &[PCS::Point], + evals: &[PCS::Evaluation], + transcript: &mut IOPTranscript, +) -> Result, PCSError> +where + E: PairingEngine, + PCS: PolynomialCommitmentScheme< + E, + Polynomial = Rc>, + Point = Vec, + Evaluation = E::Fr, + >, +{ + let open_timer = start_timer!(|| format!("multi open {} points", points.len())); + + // TODO: sanity checks + let num_var = polynomials[0].num_vars; + let k = polynomials.len(); + let ell = log2(k) as usize; + let merged_num_var = num_var + ell; + + // challenge point t + let t = transcript.get_and_append_challenge_vectors("t".as_ref(), ell)?; + + // eq(t, i) for i in [0..k] + let eq_t_i_list = build_eq_x_r_vec(t.as_ref())?; + + // \tilde g(i, b) = eq(t, i) * f_i(b) + let timer = start_timer!(|| format!("compute tilde g for {} points", points.len())); + let mut tilde_g_eval = vec![E::Fr::zero(); 1 << (ell + num_var)]; + let block_size = 1 << num_var; + for (index, f_i) in polynomials.iter().enumerate() { + for (j, &f_i_eval) in f_i.iter().enumerate() { + tilde_g_eval[index * block_size + j] = f_i_eval * eq_t_i_list[index]; + } + } + let tilde_g = Rc::new(DenseMultilinearExtension::from_evaluations_vec( + merged_num_var, + tilde_g_eval, + )); + end_timer!(timer); + + let timer = start_timer!(|| format!("compute tilde eq for {} points", points.len())); + let mut tilde_eq_eval = vec![E::Fr::zero(); 1 << (ell + num_var)]; + for (index, point) in points.iter().enumerate() { + let eq_b_zi = build_eq_x_r_vec(point)?; + let start = index * block_size; + tilde_eq_eval[start..start + block_size].copy_from_slice(eq_b_zi.as_slice()); + } + let tilde_eq = Rc::new(DenseMultilinearExtension::from_evaluations_vec( + merged_num_var, + tilde_eq_eval, + )); + end_timer!(timer); + + // built the virtual polynomial for SumCheck + let timer = start_timer!(|| format!("sum check prove of {} variables", num_var + ell)); + + let step = start_timer!(|| "add mle"); + let mut sum_check_vp = VirtualPolynomial::new(num_var + ell); + sum_check_vp.add_mle_list([tilde_g.clone(), tilde_eq], E::Fr::one())?; + end_timer!(step); + + let proof = match as SumCheck>::prove(&sum_check_vp, transcript) { + Ok(p) => p, + Err(_e) => { + // cannot wrap IOPError with PCSError due to cyclic dependency + return Err(PCSError::InvalidProver( + "Sumcheck in batch proving Failed".to_string(), + )); + }, + }; + + end_timer!(timer); + + // (a1, a2) := sumcheck's point + let step = start_timer!(|| "open at a2"); + let a1 = &proof.point[num_var..]; + let a2 = &proof.point[..num_var]; + end_timer!(step); + + // build g'(a2) + let step = start_timer!(|| "evaluate at a2"); + let g_prime = Rc::new(fix_last_variables(&tilde_g, a1)); + end_timer!(step); + + let step = start_timer!(|| "pcs open"); + let (g_prime_proof, _g_prime_eval) = PCS::open(prover_param, &g_prime, a2.to_vec().as_ref())?; + // assert_eq!(g_prime_eval, tilde_g_eval); + end_timer!(step); + + let step = start_timer!(|| "evaluate fi(pi)"); + end_timer!(step); + end_timer!(open_timer); + + Ok(BatchProof { + sum_check_proof: proof, + f_i_eval_at_point_i: evals.to_vec(), + g_prime_proof, + }) +} + +/// Steps: +/// 1. get challenge point t from transcript +/// 2. build g' commitment +/// 3. ensure \sum_i eq(t, ) * f_i_evals matches the sum via SumCheck +/// verification 4. verify commitment +pub(crate) fn batch_verify_internal( + verifier_param: &PCS::VerifierParam, + f_i_commitments: &[Commitment], + points: &[PCS::Point], + proof: &BatchProof, + transcript: &mut IOPTranscript, +) -> Result +where + E: PairingEngine, + PCS: PolynomialCommitmentScheme< + E, + Polynomial = Rc>, + Point = Vec, + Evaluation = E::Fr, + Commitment = Commitment, + >, +{ + let open_timer = start_timer!(|| "batch verification"); + + // TODO: sanity checks + + let k = f_i_commitments.len(); + let ell = log2(k) as usize; + let num_var = proof.sum_check_proof.point.len() - ell; + + // challenge point t + let t = transcript.get_and_append_challenge_vectors("t".as_ref(), ell)?; + + // sum check point (a1, a2) + let a1 = &proof.sum_check_proof.point[num_var..]; + let a2 = &proof.sum_check_proof.point[..num_var]; + + // build g' commitment + let eq_a1_list = build_eq_x_r_vec(a1)?; + let eq_t_list = build_eq_x_r_vec(t.as_ref())?; + + let mut g_prime_commit = E::G1Affine::zero().into_projective(); + for i in 0..k { + let tmp = eq_a1_list[i] * eq_t_list[i]; + g_prime_commit += &f_i_commitments[i].0.mul(tmp); + } + + // ensure \sum_i eq(t, ) * f_i_evals matches the sum via SumCheck + // verification + let mut sum = E::Fr::zero(); + for (i, &e) in eq_t_list.iter().enumerate().take(k) { + sum += e * proof.f_i_eval_at_point_i[i]; + } + let aux_info = VPAuxInfo { + max_degree: 2, + num_variables: num_var + ell, + phantom: PhantomData, + }; + let subclaim = match as SumCheck>::verify( + sum, + &proof.sum_check_proof, + &aux_info, + transcript, + ) { + Ok(p) => p, + Err(_e) => { + // cannot wrap IOPError with PCSError due to cyclic dependency + return Err(PCSError::InvalidProver( + "Sumcheck in batch verification failed".to_string(), + )); + }, + }; + let mut eq_tilde_eval = E::Fr::zero(); + for (point, &coef) in points.iter().zip(eq_a1_list.iter()) { + eq_tilde_eval += coef * eq_eval(a2, point)?; + } + let tilde_g_eval = subclaim.expected_evaluation / eq_tilde_eval; + + // verify commitment + let res = PCS::verify( + verifier_param, + &Commitment(g_prime_commit.into_affine()), + a2.to_vec().as_ref(), + &tilde_g_eval, + &proof.g_prime_proof, + )?; + + end_timer!(open_timer); + Ok(res) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::pcs::{ + prelude::{MultilinearKzgPCS, MultilinearUniversalParams}, + StructuredReferenceString, + }; + use arithmetic::get_batched_nv; + use ark_bls12_381::Bls12_381 as E; + use ark_ec::PairingEngine; + use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; + use ark_std::{ + rand::{CryptoRng, RngCore}, + test_rng, + vec::Vec, + UniformRand, + }; + + type Fr = ::Fr; + + fn test_multi_open_helper( + ml_params: &MultilinearUniversalParams, + polys: &[Rc>], + rng: &mut R, + ) -> Result<(), PCSError> { + let merged_nv = get_batched_nv(polys[0].num_vars(), polys.len()); + let (ml_ck, ml_vk) = ml_params.trim(merged_nv)?; + + let mut points = Vec::new(); + for poly in polys.iter() { + let point = (0..poly.num_vars()) + .map(|_| Fr::rand(rng)) + .collect::>(); + points.push(point); + } + + let evals = polys + .iter() + .zip(points.iter()) + .map(|(f, p)| f.evaluate(p).unwrap()) + .collect::>(); + + let commitments = polys + .iter() + .map(|poly| MultilinearKzgPCS::commit(&ml_ck.clone(), poly).unwrap()) + .collect::>(); + + let mut transcript = IOPTranscript::new("test transcript".as_ref()); + transcript.append_field_element("init".as_ref(), &Fr::zero())?; + + let batch_proof = multi_open_internal::>( + &ml_ck, + polys, + &points, + &evals, + &mut transcript, + )?; + + // good path + let mut transcript = IOPTranscript::new("test transcript".as_ref()); + transcript.append_field_element("init".as_ref(), &Fr::zero())?; + assert!(batch_verify_internal::>( + &ml_vk, + &commitments, + &points, + &batch_proof, + &mut transcript + )?); + + Ok(()) + } + + #[test] + fn test_multi_open_internal() -> Result<(), PCSError> { + let mut rng = test_rng(); + + let ml_params = MultilinearUniversalParams::::gen_srs_for_testing(&mut rng, 20)?; + for num_poly in 5..6 { + for nv in 15..16 { + let polys1: Vec<_> = (0..num_poly) + .map(|_| Rc::new(DenseMultilinearExtension::rand(nv, &mut rng))) + .collect(); + test_multi_open_helper(&ml_params, &polys1, &mut rng)?; + } + } + + Ok(()) + } +} diff --git a/pcs/src/multilinear_kzg/mod.rs b/subroutines/src/pcs/multilinear_kzg/mod.rs similarity index 53% rename from pcs/src/multilinear_kzg/mod.rs rename to subroutines/src/pcs/multilinear_kzg/mod.rs index 0e88c98..cf8d7b2 100644 --- a/pcs/src/multilinear_kzg/mod.rs +++ b/subroutines/src/pcs/multilinear_kzg/mod.rs @@ -6,22 +6,15 @@ //! Main module for multilinear KZG commitment scheme -mod batching; +pub(crate) mod batching; pub(crate) mod srs; pub(crate) mod util; -use self::batching::{ - batch_verify_internal, batch_verify_same_poly_internal, multi_open_internal, - multi_open_same_poly_internal, -}; use crate::{ - prelude::{ - Commitment, UnivariateProverParam, UnivariateUniversalParams, UnivariateVerifierParam, - }, - univariate_kzg::UnivariateKzgProof, - PCSError, PolynomialCommitmentScheme, StructuredReferenceString, + pcs::{prelude::Commitment, PCSError, PolynomialCommitmentScheme, StructuredReferenceString}, + BatchProof, }; -use arithmetic::{evaluate_opt, merge_polynomials}; +use arithmetic::evaluate_opt; use ark_ec::{ msm::{FixedBaseMSM, VariableBaseMSM}, AffineCurve, PairingEngine, ProjectiveCurve, @@ -43,6 +36,9 @@ use ark_std::{ }; // use batching::{batch_verify_internal, multi_open_internal}; use srs::{MultilinearProverParam, MultilinearUniversalParams, MultilinearVerifierParam}; +use transcript::IOPTranscript; + +use self::batching::{batch_verify_internal, multi_open_internal}; /// KZG Polynomial Commitment Scheme on multilinear polynomials. pub struct MultilinearKzgPCS { @@ -57,36 +53,19 @@ pub struct MultilinearKzgProof { pub proofs: Vec, } -#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] -/// proof of batch opening -pub struct MultilinearKzgBatchProof { - /// The actual proof - pub proof: MultilinearKzgProof, - /// Commitment to q(x):= w(l(x)) where - /// - `w` is the merged MLE - /// - `l` is the list of univariate polys that goes through all points - pub q_x_commit: Commitment, - /// openings of q(x) at 1, omega, ..., and r - pub q_x_opens: Vec>, -} - impl PolynomialCommitmentScheme for MultilinearKzgPCS { // Parameters - type ProverParam = ( - MultilinearProverParam, - UnivariateProverParam, - ); - type VerifierParam = (MultilinearVerifierParam, UnivariateVerifierParam); - type SRS = (MultilinearUniversalParams, UnivariateUniversalParams); + type ProverParam = MultilinearProverParam; + type VerifierParam = MultilinearVerifierParam; + type SRS = MultilinearUniversalParams; // Polynomial and its associated types type Polynomial = Rc>; type Point = Vec; type Evaluation = E::Fr; // Commitments and proofs type Commitment = Commitment; - type BatchCommitment = Commitment; type Proof = MultilinearKzgProof; - type BatchProof = MultilinearKzgBatchProof; + type BatchProof = BatchProof; /// Build SRS for testing. /// @@ -99,10 +78,7 @@ impl PolynomialCommitmentScheme for MultilinearKzgPCS { rng: &mut R, log_size: usize, ) -> Result { - Ok(( - MultilinearUniversalParams::::gen_srs_for_testing(rng, log_size)?, - UnivariateUniversalParams::::gen_srs_for_testing(rng, 1 << log_size)?, - )) + MultilinearUniversalParams::::gen_srs_for_testing(rng, log_size) } /// Trim the universal parameters to specialize the public parameters. @@ -110,9 +86,11 @@ impl PolynomialCommitmentScheme for MultilinearKzgPCS { /// `supported_num_vars` for multilinear. fn trim( srs: impl Borrow, - supported_degree: usize, + supported_degree: Option, supported_num_vars: Option, ) -> Result<(Self::ProverParam, Self::VerifierParam), PCSError> { + assert!(supported_degree.is_none()); + let supported_num_vars = match supported_num_vars { Some(p) => p, None => { @@ -121,10 +99,9 @@ impl PolynomialCommitmentScheme for MultilinearKzgPCS { )) }, }; - let (uni_ck, uni_vk) = srs.borrow().1.trim(supported_degree)?; - let (ml_ck, ml_vk) = srs.borrow().0.trim(supported_num_vars)?; + let (ml_ck, ml_vk) = srs.borrow().trim(supported_num_vars)?; - Ok(((ml_ck, uni_ck), (ml_vk, uni_vk))) + Ok((ml_ck, ml_vk)) } /// Generate a commitment for a polynomial. @@ -137,48 +114,20 @@ impl PolynomialCommitmentScheme for MultilinearKzgPCS { ) -> Result { let prover_param = prover_param.borrow(); let commit_timer = start_timer!(|| "commit"); - if prover_param.0.num_vars < poly.num_vars { + if prover_param.num_vars < poly.num_vars { return Err(PCSError::InvalidParameters(format!( "MlE length ({}) exceeds param limit ({})", - poly.num_vars, prover_param.0.num_vars + poly.num_vars, prover_param.num_vars ))); } - let ignored = prover_param.0.num_vars - poly.num_vars; + let ignored = prover_param.num_vars - poly.num_vars; let scalars: Vec<_> = poly .to_evaluations() .into_iter() .map(|x| x.into_repr()) .collect(); let commitment = VariableBaseMSM::multi_scalar_mul( - &prover_param.0.powers_of_g[ignored].evals, - scalars.as_slice(), - ) - .into_affine(); - - end_timer!(commit_timer); - Ok(Commitment(commitment)) - } - - /// Generate a commitment for a list of polynomials. - /// - /// This function takes `2^(num_vars + log(polys.len())` number of scalar - /// multiplications over G1. - fn multi_commit( - prover_param: impl Borrow, - polys: &[Self::Polynomial], - ) -> Result { - let prover_param = prover_param.borrow(); - let commit_timer = start_timer!(|| "multi commit"); - let poly = merge_polynomials(polys)?; - - let scalars: Vec<_> = poly - .to_evaluations() - .iter() - .map(|x| x.into_repr()) - .collect(); - - let commitment = VariableBaseMSM::multi_scalar_mul( - &prover_param.0.powers_of_g[0].evals, + &prover_param.powers_of_g[ignored].evals, scalars.as_slice(), ) .into_affine(); @@ -201,69 +150,24 @@ impl PolynomialCommitmentScheme for MultilinearKzgPCS { polynomial: &Self::Polynomial, point: &Self::Point, ) -> Result<(Self::Proof, Self::Evaluation), PCSError> { - open_internal(&prover_param.borrow().0, polynomial, point) + open_internal(prover_param.borrow(), polynomial, point) } - /// Input - /// - the prover parameters for univariate KZG, - /// - the prover parameters for multilinear KZG, - /// - a list of multilinear extensions (MLEs), - /// - a commitment to all multilinear extensions, - /// - and a same number of points, - /// compute a multi-opening for all the polynomials. - /// - /// For simplicity, this API requires each MLE to have only one point. If - /// the caller wish to use more than one points per MLE, it should be - /// handled at the caller layer. - /// - /// Returns an error if the lengths do not match. - /// - /// Returns the proof, consists of - /// - the multilinear KZG opening - /// - the univariate KZG commitment to q(x) - /// - the openings and evaluations of q(x) at omega^i and r - /// - /// Steps: - /// 1. build `l(points)` which is a list of univariate polynomials that goes - /// through the points - /// 2. build MLE `w` which is the merge of all MLEs. - /// 3. build `q(x)` which is a univariate polynomial `W circ l` - /// 4. commit to q(x) and sample r from transcript - /// transcript contains: w commitment, points, q(x)'s commitment - /// 5. build q(omega^i) and their openings - /// 6. build q(r) and its opening - /// 7. get a point `p := l(r)` - /// 8. output an opening of `w` over point `p` - /// 9. output `w(p)` + /// Input a list of multilinear extensions, and a same number of points, and + /// a transcript, compute a multi-opening for all the polynomials. fn multi_open( prover_param: impl Borrow, - multi_commitment: &Self::BatchCommitment, polynomials: &[Self::Polynomial], points: &[Self::Point], - ) -> Result<(Self::BatchProof, Vec), PCSError> { - multi_open_internal::( - &prover_param.borrow().1, - &prover_param.borrow().0, + evals: &[Self::Evaluation], + transcript: &mut IOPTranscript, + ) -> Result, PCSError> { + multi_open_internal( + prover_param.borrow(), polynomials, - multi_commitment, - points, - ) - } - - /// Input a multilinear extension, and a number of points, and - /// a transcript, compute a multi-opening for all the polynomials. - fn multi_open_single_poly( - prover_param: impl Borrow, - commitment: &Self::Commitment, - polynomial: &Self::Polynomial, - points: &[Self::Point], - ) -> Result<(Self::BatchProof, Vec), PCSError> { - multi_open_same_poly_internal::( - &prover_param.borrow().1, - &prover_param.borrow().0, - polynomial, - commitment, points, + evals, + transcript, ) } @@ -280,55 +184,19 @@ impl PolynomialCommitmentScheme for MultilinearKzgPCS { value: &E::Fr, proof: &Self::Proof, ) -> Result { - verify_internal(&verifier_param.0, commitment, point, value, proof) - } - - /// Verifies that `value` is the evaluation at `x_i` of the polynomial - /// `poly_i` committed inside `comm`. - /// steps: - /// - /// 1. put `q(x)`'s evaluations over `(1, omega,...)` into transcript - /// 2. sample `r` from transcript - /// 3. check `q(r) == value` - /// 4. build `l(points)` which is a list of univariate polynomials that goes - /// through the points - /// 5. get a point `p := l(r)` - /// 6. verifies `p` is verifies against proof - fn batch_verify( - verifier_param: &Self::VerifierParam, - multi_commitment: &Self::BatchCommitment, - points: &[Self::Point], - values: &[E::Fr], - batch_proof: &Self::BatchProof, - _rng: &mut R, - ) -> Result { - batch_verify_internal( - &verifier_param.1, - &verifier_param.0, - multi_commitment, - points, - values, - batch_proof, - ) + verify_internal(verifier_param, commitment, point, value, proof) } /// Verifies that `value_i` is the evaluation at `x_i` of the polynomial - /// `poly` committed inside `comm`. - fn batch_verify_single_poly( + /// `poly_i` committed inside `comm`. + fn batch_verify( verifier_param: &Self::VerifierParam, - commitment: &Self::Commitment, + commitments: &[Self::Commitment], points: &[Self::Point], - values: &[E::Fr], batch_proof: &Self::BatchProof, + transcript: &mut IOPTranscript, ) -> Result { - batch_verify_same_poly_internal( - &verifier_param.1, - &verifier_param.0, - commitment, - points, - values, - batch_proof, - ) + batch_verify_internal(verifier_param, commitments, points, batch_proof, transcript) } } @@ -478,25 +346,22 @@ fn verify_internal( #[cfg(test)] mod tests { use super::*; - use crate::multilinear_kzg::util::compute_qx_degree; - use arithmetic::get_batched_nv; use ark_bls12_381::Bls12_381; use ark_ec::PairingEngine; use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; - use ark_std::{log2, rand::RngCore, test_rng, vec::Vec, UniformRand}; + use ark_std::{rand::RngCore, test_rng, vec::Vec, UniformRand}; type E = Bls12_381; type Fr = ::Fr; fn test_single_helper( - params: &(MultilinearUniversalParams, UnivariateUniversalParams), + params: &MultilinearUniversalParams, poly: &Rc>, rng: &mut R, ) -> Result<(), PCSError> { let nv = poly.num_vars(); assert_ne!(nv, 0); - let uni_degree = 1; - let (ck, vk) = MultilinearKzgPCS::trim(params, uni_degree, Some(nv + 1))?; + let (ck, vk) = MultilinearKzgPCS::trim(params, None, Some(nv + 1))?; let point: Vec<_> = (0..nv).map(|_| Fr::rand(rng)).collect(); let com = MultilinearKzgPCS::commit(&ck, poly)?; let (proof, value) = MultilinearKzgPCS::open(&ck, poly, &point)?; @@ -537,108 +402,4 @@ mod tests { // normal polynomials assert!(MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 0).is_err()); } - - fn test_multi_open_single_poly_helper( - params: &(MultilinearUniversalParams, UnivariateUniversalParams), - poly: Rc>, - num_open: usize, - rng: &mut R, - ) -> Result<(), PCSError> { - let nv = poly.num_vars(); - assert_ne!(nv, 0); - let uni_degree = 1024; - let (ck, vk) = MultilinearKzgPCS::trim(params, uni_degree, Some(nv + 1))?; - let mut points = vec![]; - for _ in 0..num_open { - let point: Vec<_> = (0..nv).map(|_| Fr::rand(rng)).collect(); - points.push(point) - } - let com = MultilinearKzgPCS::commit(&ck, &poly)?; - let (proof, mut values) = - MultilinearKzgPCS::multi_open_single_poly(&ck, &com, &poly, &points)?; - for (a, b) in values.iter().zip(points.iter()) { - let p = poly.evaluate(&b).unwrap(); - assert_eq!(*a, p); - } - - assert!(MultilinearKzgPCS::batch_verify_single_poly( - &vk, &com, &points, &values, &proof - )?); - - values[0] = Fr::rand(rng); - assert!(!MultilinearKzgPCS::batch_verify_single_poly( - &vk, &com, &points, &values, &proof - )?); - Ok(()) - } - - #[test] - fn test_multi_open_single_poly() -> Result<(), PCSError> { - let mut rng = test_rng(); - - let params = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 15)?; - - for nv in 1..10 { - for num_open in 2..10 { - let poly1 = Rc::new(DenseMultilinearExtension::rand(nv, &mut rng)); - test_multi_open_single_poly_helper(¶ms, poly1, num_open, &mut rng)?; - } - } - - Ok(()) - } - - fn test_multi_open_helper( - params: &(MultilinearUniversalParams, UnivariateUniversalParams), - polys: &[Rc>], - num_open: usize, - rng: &mut R, - ) -> Result<(), PCSError> { - let nv = polys[0].num_vars(); - assert_ne!(nv, 0); - let merged_nv = get_batched_nv(nv, polys.len()); - let qx_degree = compute_qx_degree(merged_nv, polys.len()); - let padded_qx_degree = 1usize << log2(qx_degree); - - let (ck, vk) = MultilinearKzgPCS::trim(params, padded_qx_degree, Some(merged_nv))?; - let mut points = vec![]; - for _ in 0..num_open { - let point: Vec<_> = (0..nv).map(|_| Fr::rand(rng)).collect(); - points.push(point) - } - let com = MultilinearKzgPCS::multi_commit(&ck, &polys)?; - let (proof, mut values) = MultilinearKzgPCS::multi_open(&ck, &com, polys, &points)?; - - assert!(MultilinearKzgPCS::batch_verify( - &vk, &com, &points, &values, &proof, rng - )?); - - values[0] = Fr::rand(rng); - assert!(!MultilinearKzgPCS::batch_verify_single_poly( - &vk, &com, &points, &values, &proof - )?); - - Ok(()) - } - - #[test] - fn test_multi_open() -> Result<(), PCSError> { - let mut rng = test_rng(); - - let params = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, 15)?; - - // normal polynomials - for nv in 1..10 { - for num_open in 1..4 { - let mut polys = vec![]; - for _ in 0..num_open { - let poly = Rc::new(DenseMultilinearExtension::rand(nv, &mut rng)); - polys.push(poly) - } - - test_multi_open_helper(¶ms, &polys, num_open, &mut rng)?; - } - } - Ok(()) - } } diff --git a/pcs/src/multilinear_kzg/srs.rs b/subroutines/src/pcs/multilinear_kzg/srs.rs similarity index 90% rename from pcs/src/multilinear_kzg/srs.rs rename to subroutines/src/pcs/multilinear_kzg/srs.rs index c24fbdd..e82d57e 100644 --- a/pcs/src/multilinear_kzg/srs.rs +++ b/subroutines/src/pcs/multilinear_kzg/srs.rs @@ -5,7 +5,9 @@ // along with the Jellyfish library. If not, see . //! Implementing Structured Reference Strings for multilinear polynomial KZG -use crate::{prelude::PCSError, StructuredReferenceString}; +use crate::pcs::{ + multilinear_kzg::util::eq_extension, prelude::PCSError, StructuredReferenceString, +}; use ark_ec::{msm::FixedBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve}; use ark_ff::{Field, PrimeField}; use ark_poly::DenseMultilinearExtension; @@ -232,28 +234,6 @@ fn remove_dummy_variable(poly: &[F], pad: usize) -> Result, PCS Ok((0..(1 << nv)).map(|x| poly[x << pad]).collect()) } -/// Generate eq(t,x), a product of multilinear polynomials with fixed t. -/// eq(a,b) is takes extensions of a,b in {0,1}^num_vars such that if a and b in -/// {0,1}^num_vars are equal then this polynomial evaluates to 1. -fn eq_extension(t: &[F]) -> Vec> { - let start = start_timer!(|| "eq extension"); - - let dim = t.len(); - let mut result = Vec::new(); - for (i, &ti) in t.iter().enumerate().take(dim) { - let mut poly = Vec::with_capacity(1 << dim); - for x in 0..(1 << dim) { - let xi = if x >> i & 1 == 1 { F::one() } else { F::zero() }; - let ti_xi = ti * xi; - poly.push(ti_xi + ti_xi - xi - ti + F::one()); - } - result.push(DenseMultilinearExtension::from_evaluations_vec(dim, poly)); - } - - end_timer!(start); - result -} - #[cfg(test)] mod tests { use super::*; diff --git a/subroutines/src/pcs/multilinear_kzg/util.rs b/subroutines/src/pcs/multilinear_kzg/util.rs new file mode 100644 index 0000000..0de3ea3 --- /dev/null +++ b/subroutines/src/pcs/multilinear_kzg/util.rs @@ -0,0 +1,51 @@ +// Copyright (c) 2022 Espresso Systems (espressosys.com) +// This file is part of the Jellyfish library. + +// You should have received a copy of the MIT License +// along with the Jellyfish library. If not, see . + +//! Useful utilities for KZG PCS +use ark_ff::PrimeField; +use ark_poly::DenseMultilinearExtension; +use ark_std::{end_timer, start_timer, vec::Vec}; + +use crate::PCSError; + +/// Generate eq(t,x), a product of multilinear polynomials with fixed t. +/// eq(a,b) is takes extensions of a,b in {0,1}^num_vars such that if a and b in +/// {0,1}^num_vars are equal then this polynomial evaluates to 1. +pub(crate) fn eq_extension(t: &[F]) -> Vec> { + let start = start_timer!(|| "eq extension"); + + let dim = t.len(); + let mut result = Vec::new(); + for (i, &ti) in t.iter().enumerate().take(dim) { + let mut poly = Vec::with_capacity(1 << dim); + for x in 0..(1 << dim) { + let xi = if x >> i & 1 == 1 { F::one() } else { F::zero() }; + let ti_xi = ti * xi; + poly.push(ti_xi + ti_xi - xi - ti + F::one()); + } + result.push(DenseMultilinearExtension::from_evaluations_vec(dim, poly)); + } + + end_timer!(start); + result +} + +/// Evaluate eq polynomial. use the public one later +pub(crate) fn eq_eval(x: &[F], y: &[F]) -> Result { + if x.len() != y.len() { + return Err(PCSError::InvalidParameters( + "x and y have different length".to_string(), + )); + } + let start = start_timer!(|| "eq_eval"); + let mut res = F::one(); + for (&xi, &yi) in x.iter().zip(y.iter()) { + let xi_yi = xi * yi; + res *= xi_yi + xi_yi - xi - yi + F::one(); + } + end_timer!(start); + Ok(res) +} diff --git a/pcs/src/prelude.rs b/subroutines/src/pcs/prelude.rs similarity index 84% rename from pcs/src/prelude.rs rename to subroutines/src/pcs/prelude.rs index c5623d0..be35260 100644 --- a/pcs/src/prelude.rs +++ b/subroutines/src/pcs/prelude.rs @@ -5,12 +5,12 @@ // along with the Jellyfish library. If not, see . //! Prelude -pub use crate::{ +pub use crate::pcs::{ errors::PCSError, multilinear_kzg::{ + batching::BatchProof, srs::{MultilinearProverParam, MultilinearUniversalParams, MultilinearVerifierParam}, - util::compute_qx_degree, - MultilinearKzgBatchProof, MultilinearKzgPCS, MultilinearKzgProof, + MultilinearKzgPCS, MultilinearKzgProof, }, structs::Commitment, univariate_kzg::{ diff --git a/pcs/readme.md b/subroutines/src/pcs/readme.md similarity index 100% rename from pcs/readme.md rename to subroutines/src/pcs/readme.md diff --git a/pcs/src/structs.rs b/subroutines/src/pcs/structs.rs similarity index 100% rename from pcs/src/structs.rs rename to subroutines/src/pcs/structs.rs diff --git a/pcs/src/univariate_kzg/mod.rs b/subroutines/src/pcs/univariate_kzg/mod.rs similarity index 55% rename from pcs/src/univariate_kzg/mod.rs rename to subroutines/src/pcs/univariate_kzg/mod.rs index 1c7faed..9fa2010 100644 --- a/pcs/src/univariate_kzg/mod.rs +++ b/subroutines/src/pcs/univariate_kzg/mod.rs @@ -6,7 +6,9 @@ //! Main module for univariate KZG commitment scheme -use crate::{prelude::Commitment, PCSError, PolynomialCommitmentScheme, StructuredReferenceString}; +use crate::pcs::{ + prelude::Commitment, PCSError, PolynomialCommitmentScheme, StructuredReferenceString, +}; use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve}; use ark_ff::PrimeField; use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial}; @@ -20,12 +22,9 @@ use ark_std::{ string::ToString, vec, vec::Vec, - One, UniformRand, Zero, + One, }; -#[cfg(feature = "parallel")] -use rayon::prelude::*; use srs::{UnivariateProverParam, UnivariateUniversalParams, UnivariateVerifierParam}; -use util::parallelizable_slice_iter; pub(crate) mod srs; @@ -55,9 +54,10 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { type Evaluation = E::Fr; // Polynomial and its associated types type Commitment = Commitment; - type BatchCommitment = Vec; type Proof = UnivariateKzgProof; - type BatchProof = UnivariateKzgBatchProof; + + // We do not implement batch univariate KZG at the current version. + type BatchProof = (); /// Build SRS for testing. /// @@ -77,15 +77,16 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { /// `supported_num_vars` must be None or an error is returned. fn trim( srs: impl Borrow, - supported_degree: usize, + supported_degree: Option, supported_num_vars: Option, ) -> Result<(Self::ProverParam, Self::VerifierParam), PCSError> { + assert!(supported_num_vars.is_none()); if supported_num_vars.is_some() { return Err(PCSError::InvalidParameters( "univariate should not receive a num_var param".to_string(), )); } - srs.borrow().trim(supported_degree) + srs.borrow().trim(supported_degree.unwrap()) } /// Generate a commitment for a polynomial @@ -120,21 +121,6 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { Ok(Commitment(commitment)) } - /// Generate a commitment for a list of polynomials - fn multi_commit( - prover_param: impl Borrow, - polys: &[Self::Polynomial], - ) -> Result { - let prover_param = prover_param.borrow(); - let commit_time = start_timer!(|| format!("batch commit {} polynomials", polys.len())); - let res = parallelizable_slice_iter(polys) - .map(|poly| Self::commit(prover_param, poly)) - .collect::, PCSError>>()?; - - end_timer!(commit_time); - Ok(res) - } - /// On input a polynomial `p` and a point `point`, outputs a proof for the /// same. fn open( @@ -165,48 +151,6 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { Ok((Self::Proof { proof }, eval)) } - /// Input a list of polynomials, and a same number of points, - /// compute a multi-opening for all the polynomials. - // This is a naive approach - // TODO: to implement the more efficient batch opening algorithm - // (e.g., the appendix C.4 in https://eprint.iacr.org/2020/1536.pdf) - fn multi_open( - prover_param: impl Borrow, - _multi_commitment: &Self::BatchCommitment, - polynomials: &[Self::Polynomial], - points: &[Self::Point], - ) -> Result<(Self::BatchProof, Vec), PCSError> { - let open_time = start_timer!(|| format!("batch opening {} polynomials", polynomials.len())); - if polynomials.len() != points.len() { - return Err(PCSError::InvalidParameters(format!( - "poly length {} is different from points length {}", - polynomials.len(), - points.len() - ))); - } - let mut batch_proof = vec![]; - let mut evals = vec![]; - for (poly, point) in polynomials.iter().zip(points.iter()) { - let (proof, eval) = Self::open(prover_param.borrow(), poly, point)?; - batch_proof.push(proof); - evals.push(eval); - } - - end_timer!(open_time); - Ok((batch_proof, evals)) - } - - /// Input a multilinear extension, and a number of points, and - /// a transcript, compute a multi-opening for all the polynomials. - fn multi_open_single_poly( - _prover_param: impl Borrow, - _commitment: &Self::Commitment, - _polynomials: &Self::Polynomial, - _points: &[Self::Point], - ) -> Result<(Self::BatchProof, Vec), PCSError> { - unimplemented!() - } - /// Verifies that `value` is the evaluation at `x` of the polynomial /// committed inside `comm`. fn verify( @@ -234,78 +178,6 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { end_timer!(check_time, || format!("Result: {}", res)); Ok(res) } - - /// Verifies that `value_i` is the evaluation at `x_i` of the polynomial - /// `poly_i` committed inside `comm`. - // This is a naive approach - // TODO: to implement the more efficient batch verification algorithm - // (e.g., the appendix C.4 in https://eprint.iacr.org/2020/1536.pdf) - fn batch_verify( - verifier_param: &Self::VerifierParam, - multi_commitment: &Self::BatchCommitment, - points: &[Self::Point], - values: &[E::Fr], - batch_proof: &Self::BatchProof, - rng: &mut R, - ) -> Result { - let check_time = - start_timer!(|| format!("Checking {} evaluation proofs", multi_commitment.len())); - - let mut total_c = ::zero(); - let mut total_w = ::zero(); - - let combination_time = start_timer!(|| "Combining commitments and proofs"); - let mut randomizer = E::Fr::one(); - // Instead of multiplying g and gamma_g in each turn, we simply accumulate - // their coefficients and perform a final multiplication at the end. - let mut g_multiplier = E::Fr::zero(); - for (((c, z), v), proof) in multi_commitment - .iter() - .zip(points) - .zip(values) - .zip(batch_proof) - { - let w = proof.proof; - let mut temp = w.mul(*z); - temp.add_assign_mixed(&c.0); - let c = temp; - g_multiplier += &(randomizer * v); - total_c += &c.mul(randomizer.into_repr()); - total_w += &w.mul(randomizer.into_repr()); - // We don't need to sample randomizers from the full field, - // only from 128-bit strings. - randomizer = u128::rand(rng).into(); - } - total_c -= &verifier_param.g.mul(g_multiplier); - end_timer!(combination_time); - - let to_affine_time = start_timer!(|| "Converting results to affine for pairing"); - let affine_points = E::G1Projective::batch_normalization_into_affine(&[-total_w, total_c]); - let (total_w, total_c) = (affine_points[0], affine_points[1]); - end_timer!(to_affine_time); - - let pairing_time = start_timer!(|| "Performing product of pairings"); - let result = E::product_of_pairings(&[ - (total_w.into(), verifier_param.beta_h.into()), - (total_c.into(), verifier_param.h.into()), - ]) - .is_one(); - end_timer!(pairing_time); - end_timer!(check_time, || format!("Result: {}", result)); - Ok(result) - } - - /// Verifies that `value_i` is the evaluation at `x_i` of the polynomial - /// `poly` committed inside `comm`. - fn batch_verify_single_poly( - _verifier_param: &Self::VerifierParam, - _commitment: &Self::Commitment, - _points: &[Self::Point], - _values: &[E::Fr], - _batch_proof: &Self::BatchProof, - ) -> Result { - unimplemented!() - } } fn skip_leading_zeros_and_convert_to_bigints>( @@ -385,43 +257,6 @@ mod tests { Ok(()) } - fn batch_check_test_template() -> Result<(), PCSError> - where - E: PairingEngine, - { - let rng = &mut test_rng(); - for _ in 0..10 { - let mut degree = 0; - while degree <= 1 { - degree = usize::rand(rng) % 20; - } - let pp = UnivariateKzgPCS::::gen_srs_for_testing(rng, degree)?; - let (ck, vk) = UnivariateKzgPCS::::trim(&pp, degree, None)?; - let mut comms = Vec::new(); - let mut values = Vec::new(); - let mut points = Vec::new(); - let mut proofs = Vec::new(); - for _ in 0..10 { - let p = as UVPolynomial>::rand(degree, rng); - let comm = UnivariateKzgPCS::::commit(&ck, &p)?; - let point = E::Fr::rand(rng); - let (proof, value) = UnivariateKzgPCS::::open(&ck, &p, &point)?; - - assert!(UnivariateKzgPCS::::verify( - &vk, &comm, &point, &value, &proof - )?); - comms.push(comm); - values.push(value); - points.push(point); - proofs.push(proof); - } - assert!(UnivariateKzgPCS::::batch_verify( - &vk, &comms, &points, &values, &proofs, rng - )?); - } - Ok(()) - } - #[test] fn end_to_end_test() { end_to_end_test_template::().expect("test failed for bls12-381"); @@ -431,8 +266,4 @@ mod tests { fn linear_polynomial_test() { linear_polynomial_test_template::().expect("test failed for bls12-381"); } - #[test] - fn batch_check_test() { - batch_check_test_template::().expect("test failed for bls12-381"); - } } diff --git a/pcs/src/univariate_kzg/srs.rs b/subroutines/src/pcs/univariate_kzg/srs.rs similarity index 98% rename from pcs/src/univariate_kzg/srs.rs rename to subroutines/src/pcs/univariate_kzg/srs.rs index 933e0c4..191c76d 100644 --- a/pcs/src/univariate_kzg/srs.rs +++ b/subroutines/src/pcs/univariate_kzg/srs.rs @@ -6,7 +6,7 @@ //! Implementing Structured Reference Strings for univariate polynomial KZG -use crate::{PCSError, StructuredReferenceString}; +use crate::pcs::{PCSError, StructuredReferenceString}; use ark_ec::{msm::FixedBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve}; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; diff --git a/poly-iop/src/errors.rs b/subroutines/src/poly_iop/errors.rs similarity index 97% rename from poly-iop/src/errors.rs rename to subroutines/src/poly_iop/errors.rs index eb5cfc0..dfddc30 100644 --- a/poly-iop/src/errors.rs +++ b/subroutines/src/poly_iop/errors.rs @@ -1,9 +1,9 @@ //! Error module. +use crate::pcs::prelude::PCSError; use arithmetic::ArithErrors; use ark_std::string::String; use displaydoc::Display; -use pcs::prelude::PCSError; use transcript::TranscriptError; /// A `enum` specifying the possible failure modes of the PolyIOP. diff --git a/poly-iop/src/lib.rs b/subroutines/src/poly_iop/mod.rs similarity index 100% rename from poly-iop/src/lib.rs rename to subroutines/src/poly_iop/mod.rs diff --git a/poly-iop/src/perm_check/mod.rs b/subroutines/src/poly_iop/perm_check/mod.rs similarity index 94% rename from poly-iop/src/perm_check/mod.rs rename to subroutines/src/poly_iop/perm_check/mod.rs index 1c05479..abbf109 100644 --- a/poly-iop/src/perm_check/mod.rs +++ b/subroutines/src/poly_iop/perm_check/mod.rs @@ -1,11 +1,13 @@ //! Main module for the Permutation Check protocol use self::util::computer_num_and_denom; -use crate::{errors::PolyIOPErrors, prelude::ProductCheck, PolyIOP}; +use crate::{ + pcs::PolynomialCommitmentScheme, + poly_iop::{errors::PolyIOPErrors, prelude::ProductCheck, PolyIOP}, +}; use ark_ec::PairingEngine; use ark_poly::DenseMultilinearExtension; use ark_std::{end_timer, start_timer}; -use pcs::PolynomialCommitmentScheme; use std::rc::Rc; use transcript::IOPTranscript; @@ -153,13 +155,15 @@ where #[cfg(test)] mod test { use super::PermutationCheck; - use crate::{errors::PolyIOPErrors, PolyIOP}; + use crate::{ + pcs::{prelude::MultilinearKzgPCS, PolynomialCommitmentScheme}, + poly_iop::{errors::PolyIOPErrors, PolyIOP}, + }; use arithmetic::{evaluate_opt, identity_permutation_mle, random_permutation_mle, VPAuxInfo}; use ark_bls12_381::Bls12_381; use ark_ec::PairingEngine; use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; use ark_std::test_rng; - use pcs::{prelude::MultilinearKzgPCS, PolynomialCommitmentScheme}; use std::{marker::PhantomData, rc::Rc}; type KZG = MultilinearKzgPCS; @@ -217,7 +221,7 @@ mod test { let mut rng = test_rng(); let srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, nv + 1)?; - let (pcs_param, _) = MultilinearKzgPCS::::trim(&srs, nv + 1, Some(nv + 1))?; + let (pcs_param, _) = MultilinearKzgPCS::::trim(&srs, None, Some(nv + 1))?; { // good path: w is a permutation of w itself under the identify map @@ -233,14 +237,10 @@ mod test { // s_perm is a random map let s_perm = random_permutation_mle(nv, &mut rng); - if nv == 1 { - test_permutation_check_helper::(&pcs_param, &w, &w, &s_perm)?; - } else { - assert!(test_permutation_check_helper::( - &pcs_param, &w, &w, &s_perm - ) - .is_err()); - } + assert!( + test_permutation_check_helper::(&pcs_param, &w, &w, &s_perm) + .is_err() + ); } { diff --git a/poly-iop/src/perm_check/util.rs b/subroutines/src/poly_iop/perm_check/util.rs similarity index 97% rename from poly-iop/src/perm_check/util.rs rename to subroutines/src/poly_iop/perm_check/util.rs index 2098174..cdf2f62 100644 --- a/poly-iop/src/perm_check/util.rs +++ b/subroutines/src/poly_iop/perm_check/util.rs @@ -1,6 +1,6 @@ //! This module implements useful functions for the permutation check protocol. -use crate::errors::PolyIOPErrors; +use crate::poly_iop::errors::PolyIOPErrors; use arithmetic::identity_permutation_mle; use ark_ff::PrimeField; use ark_poly::DenseMultilinearExtension; diff --git a/subroutines/src/poly_iop/prelude.rs b/subroutines/src/poly_iop/prelude.rs new file mode 100644 index 0000000..7c996e4 --- /dev/null +++ b/subroutines/src/poly_iop/prelude.rs @@ -0,0 +1,4 @@ +pub use crate::poly_iop::{ + errors::PolyIOPErrors, perm_check::PermutationCheck, prod_check::ProductCheck, + structs::IOPProof, sum_check::SumCheck, utils::*, zero_check::ZeroCheck, PolyIOP, +}; diff --git a/poly-iop/src/prod_check/mod.rs b/subroutines/src/poly_iop/prod_check/mod.rs similarity index 96% rename from poly-iop/src/prod_check/mod.rs rename to subroutines/src/poly_iop/prod_check/mod.rs index 73440c9..0e0b69f 100644 --- a/poly-iop/src/prod_check/mod.rs +++ b/subroutines/src/poly_iop/prod_check/mod.rs @@ -1,17 +1,19 @@ //! Main module for the Product Check protocol use crate::{ - errors::PolyIOPErrors, - prod_check::util::{compute_product_poly, prove_zero_check}, - zero_check::ZeroCheck, - PolyIOP, + pcs::PolynomialCommitmentScheme, + poly_iop::{ + errors::PolyIOPErrors, + prod_check::util::{compute_product_poly, prove_zero_check}, + zero_check::ZeroCheck, + PolyIOP, + }, }; use arithmetic::VPAuxInfo; use ark_ec::PairingEngine; use ark_ff::{One, PrimeField, Zero}; use ark_poly::DenseMultilinearExtension; use ark_std::{end_timer, start_timer}; -use pcs::PolynomialCommitmentScheme; use std::rc::Rc; use transcript::IOPTranscript; @@ -201,13 +203,15 @@ where #[cfg(test)] mod test { use super::ProductCheck; - use crate::{errors::PolyIOPErrors, PolyIOP}; + use crate::{ + pcs::{prelude::MultilinearKzgPCS, PolynomialCommitmentScheme}, + poly_iop::{errors::PolyIOPErrors, PolyIOP}, + }; use arithmetic::VPAuxInfo; use ark_bls12_381::{Bls12_381, Fr}; use ark_ec::PairingEngine; use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; use ark_std::test_rng; - use pcs::{prelude::MultilinearKzgPCS, PolynomialCommitmentScheme}; use std::{marker::PhantomData, rc::Rc}; // f and g are guaranteed to have the same product @@ -282,7 +286,7 @@ mod test { g.evaluations.reverse(); let srs = MultilinearKzgPCS::::gen_srs_for_testing(&mut rng, nv + 1)?; - let (pcs_param, _) = MultilinearKzgPCS::::trim(&srs, nv + 1, Some(nv + 1))?; + let (pcs_param, _) = MultilinearKzgPCS::::trim(&srs, None, Some(nv + 1))?; test_product_check_helper::>(&f, &g, &pcs_param)?; diff --git a/poly-iop/src/prod_check/util.rs b/subroutines/src/poly_iop/prod_check/util.rs similarity index 98% rename from poly-iop/src/prod_check/util.rs rename to subroutines/src/poly_iop/prod_check/util.rs index 5a67f5c..d21032a 100644 --- a/poly-iop/src/prod_check/util.rs +++ b/subroutines/src/poly_iop/prod_check/util.rs @@ -1,6 +1,6 @@ //! This module implements useful functions for the product check protocol. -use crate::{errors::PolyIOPErrors, structs::IOPProof, zero_check::ZeroCheck, PolyIOP}; +use crate::poly_iop::{errors::PolyIOPErrors, structs::IOPProof, zero_check::ZeroCheck, PolyIOP}; use arithmetic::{get_index, VirtualPolynomial}; use ark_ff::PrimeField; use ark_poly::DenseMultilinearExtension; diff --git a/poly-iop/readme.md b/subroutines/src/poly_iop/readme.md similarity index 100% rename from poly-iop/readme.md rename to subroutines/src/poly_iop/readme.md diff --git a/poly-iop/src/structs.rs b/subroutines/src/poly_iop/structs.rs similarity index 100% rename from poly-iop/src/structs.rs rename to subroutines/src/poly_iop/structs.rs diff --git a/poly-iop/src/sum_check/mod.rs b/subroutines/src/poly_iop/sum_check/mod.rs similarity index 99% rename from poly-iop/src/sum_check/mod.rs rename to subroutines/src/poly_iop/sum_check/mod.rs index db15d60..3f3ab93 100644 --- a/poly-iop/src/sum_check/mod.rs +++ b/subroutines/src/poly_iop/sum_check/mod.rs @@ -1,6 +1,6 @@ //! This module implements the sum check protocol. -use crate::{ +use crate::poly_iop::{ errors::PolyIOPErrors, structs::{IOPProof, IOPProverState, IOPVerifierState}, PolyIOP, diff --git a/poly-iop/src/sum_check/prover.rs b/subroutines/src/poly_iop/sum_check/prover.rs similarity index 64% rename from poly-iop/src/sum_check/prover.rs rename to subroutines/src/poly_iop/sum_check/prover.rs index d8793b1..a7d1e37 100644 --- a/poly-iop/src/sum_check/prover.rs +++ b/subroutines/src/poly_iop/sum_check/prover.rs @@ -1,7 +1,7 @@ //! Prover subroutines for a SumCheck protocol. use super::SumCheckProver; -use crate::{ +use crate::poly_iop::{ errors::PolyIOPErrors, structs::{IOPProverMessage, IOPProverState}, }; @@ -88,7 +88,6 @@ impl SumCheckProver for IOPProverState { flattened_ml_extensions .par_iter_mut() .for_each(|mle| *mle = fix_variables(mle, &[r])); - #[cfg(not(feature = "parallel"))] flattened_ml_extensions .iter_mut() @@ -103,8 +102,7 @@ impl SumCheckProver for IOPProverState { self.round += 1; let products_list = self.poly.products.clone(); - let mut products_sum = Vec::with_capacity(self.poly.aux_info.max_degree + 1); - products_sum.resize(self.poly.aux_info.max_degree + 1, F::zero()); + let mut products_sum = vec![F::zero(); self.poly.aux_info.max_degree + 1]; // let compute_sum = start_timer!(|| "compute sum"); @@ -112,30 +110,59 @@ impl SumCheckProver for IOPProverState { // f(r_1, ... r_m,, x_{m+1}... x_n) #[cfg(feature = "parallel")] - for (t, e) in products_sum.iter_mut().enumerate() { - let t = F::from(t as u64); - let one_minus_t = F::one() - t; - let products = (0..1 << (self.poly.aux_info.num_variables - self.round)) - .into_par_iter() - .map(|b| { - // evaluate P_round(t) - let mut tmp = F::zero(); - products_list.iter().for_each(|(coefficient, products)| { - let num_mles = products.len(); - let mut product = *coefficient; - for &f in products.iter().take(num_mles) { - let table = &flattened_ml_extensions[f]; // f's range is checked in init - product *= table[b << 1] * one_minus_t + table[(b << 1) + 1] * t; - } - tmp += product; - }); - - tmp - }) - .collect::>(); - - for i in products.iter() { - *e += i + { + let flag = (self.poly.aux_info.max_degree == 2) + && (products_list.len() == 1) + && (products_list[0].0 == F::one()); + if flag { + for (t, e) in products_sum.iter_mut().enumerate() { + let evals = (0..1 << (self.poly.aux_info.num_variables - self.round)) + .into_par_iter() + .map(|b| { + // evaluate P_round(t) + let table0 = &flattened_ml_extensions[products_list[0].1[0]]; + let table1 = &flattened_ml_extensions[products_list[0].1[1]]; + if t == 0 { + table0[b << 1] * table1[b << 1] + } else if t == 1 { + table0[(b << 1) + 1] * table1[(b << 1) + 1] + } else { + (table0[(b << 1) + 1] + table0[(b << 1) + 1] - table0[b << 1]) + * (table1[(b << 1) + 1] + table1[(b << 1) + 1] - table1[b << 1]) + } + }) + .collect::>(); + for val in evals.iter() { + *e += val + } + } + } else { + for (t, e) in products_sum.iter_mut().enumerate() { + let t = F::from(t as u64); + let products = (0..1 << (self.poly.aux_info.num_variables - self.round)) + .into_par_iter() + .map(|b| { + // evaluate P_round(t) + let mut tmp = F::zero(); + products_list.iter().for_each(|(coefficient, products)| { + let num_mles = products.len(); + let mut product = *coefficient; + for &f in products.iter().take(num_mles) { + let table = &flattened_ml_extensions[f]; // f's range is checked in init + product *= + table[b << 1] + (table[(b << 1) + 1] - table[b << 1]) * t; + } + tmp += product; + }); + + tmp + }) + .collect::>(); + + for i in products.iter() { + *e += i + } + } } } @@ -151,7 +178,7 @@ impl SumCheckProver for IOPProverState { let mut product = *coefficient; for &f in products.iter().take(num_mles) { let table = &flattened_ml_extensions[f]; // f's range is checked in init - product *= table[b << 1] * one_minus_t + table[(b << 1) + 1] * t; + product *= table[b << 1] + (table[(b << 1) + 1] - table[b << 1]) * t; } *e += product; } diff --git a/poly-iop/src/sum_check/verifier.rs b/subroutines/src/poly_iop/sum_check/verifier.rs similarity index 99% rename from poly-iop/src/sum_check/verifier.rs rename to subroutines/src/poly_iop/sum_check/verifier.rs index 4086dab..90edb48 100644 --- a/poly-iop/src/sum_check/verifier.rs +++ b/subroutines/src/poly_iop/sum_check/verifier.rs @@ -1,7 +1,7 @@ //! Verifier subroutines for a SumCheck protocol. use super::{SumCheckSubClaim, SumCheckVerifier}; -use crate::{ +use crate::poly_iop::{ errors::PolyIOPErrors, structs::{IOPProverMessage, IOPVerifierState}, }; @@ -311,7 +311,7 @@ fn u64_factorial(a: usize) -> u64 { #[cfg(test)] mod test { use super::interpolate_uni_poly; - use crate::errors::PolyIOPErrors; + use crate::poly_iop::errors::PolyIOPErrors; use ark_bls12_381::Fr; use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial}; use ark_std::{vec::Vec, UniformRand}; diff --git a/poly-iop/src/utils.rs b/subroutines/src/poly_iop/utils.rs similarity index 100% rename from poly-iop/src/utils.rs rename to subroutines/src/poly_iop/utils.rs diff --git a/poly-iop/src/zero_check/mod.rs b/subroutines/src/poly_iop/zero_check/mod.rs similarity index 98% rename from poly-iop/src/zero_check/mod.rs rename to subroutines/src/poly_iop/zero_check/mod.rs index 0538c86..12d3424 100644 --- a/poly-iop/src/zero_check/mod.rs +++ b/subroutines/src/poly_iop/zero_check/mod.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; -use crate::{errors::PolyIOPErrors, sum_check::SumCheck, PolyIOP}; +use crate::poly_iop::{errors::PolyIOPErrors, sum_check::SumCheck, PolyIOP}; use arithmetic::build_eq_x_r; use ark_ff::PrimeField; use ark_poly::MultilinearExtension; @@ -122,7 +122,7 @@ impl ZeroCheck for PolyIOP { mod test { use super::ZeroCheck; - use crate::{errors::PolyIOPErrors, PolyIOP}; + use crate::poly_iop::{errors::PolyIOPErrors, PolyIOP}; use arithmetic::VirtualPolynomial; use ark_bls12_381::Fr; use ark_std::test_rng;