fix: organize padding

This commit is contained in:
Daniel Tehrani
2023-07-31 14:16:13 -07:00
parent 3546f03844
commit abd355d870
13 changed files with 185 additions and 130 deletions

View File

@@ -1,17 +1,16 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use tensor_pcs::{
rs_config, FieldExt, SparseMLPoly, TensorMultilinearPCS, TensorRSMultilinearPCSConfig,
Transcript,
rs_config, FieldExt, MlPoly, TensorMultilinearPCS, TensorRSMultilinearPCSConfig, Transcript,
};
fn poly<F: FieldExt>(num_vars: usize) -> SparseMLPoly<F> {
fn poly<F: FieldExt>(num_vars: usize) -> MlPoly<F> {
let num_entries: usize = 2usize.pow(num_vars as u32);
let evals = (0..num_entries)
.map(|i| (i, F::from(i as u64)))
.collect::<Vec<(usize, F)>>();
.map(|i| F::from(i as u64))
.collect::<Vec<F>>();
let ml_poly = SparseMLPoly::new(evals, num_vars);
let ml_poly = MlPoly::new(evals);
ml_poly
}
@@ -32,10 +31,13 @@ fn pcs_fft_bench(c: &mut Criterion) {
let num_vars = 13;
let ml_poly = poly(num_vars);
let ml_poly_evals = ml_poly.evals.clone();
let open_at = (0..ml_poly.num_vars)
.map(|i| F::from(i as u64))
.collect::<Vec<F>>();
let y = ml_poly.eval(&open_at);
let mut config = config_base();
config.fft_domain = Some(rs_config::smooth::gen_config::<F>(
config.num_cols(ml_poly.evals.len()),
@@ -47,8 +49,8 @@ fn pcs_fft_bench(c: &mut Criterion) {
let pcs = TensorMultilinearPCS::<F>::new(config.clone());
let mut transcript = Transcript::new(b"bench");
let comm = pcs.commit(&black_box(ml_poly.clone()));
pcs.open(&comm, &ml_poly, &open_at, &mut transcript);
let comm = pcs.commit(black_box(&ml_poly_evals));
pcs.open(&comm, &ml_poly_evals, &open_at, y, &mut transcript);
})
});
}
@@ -58,10 +60,13 @@ fn pcs_ecfft_bench(c: &mut Criterion) {
let num_vars = 13;
let ml_poly = poly(num_vars);
let ml_poly_evals = ml_poly.evals.clone();
let open_at = (0..ml_poly.num_vars)
.map(|i| F::from(i as u64))
.collect::<Vec<F>>();
let y = ml_poly.eval(&open_at);
let mut config = config_base();
config.ecfft_config = Some(rs_config::ecfft::gen_config::<F>(
config.num_cols(ml_poly.evals.len()),
@@ -73,8 +78,8 @@ fn pcs_ecfft_bench(c: &mut Criterion) {
let pcs = TensorMultilinearPCS::<F>::new(config.clone());
let mut transcript = Transcript::new(b"bench");
let comm = pcs.commit(&black_box(ml_poly.clone()));
pcs.open(&comm, &ml_poly, &open_at, &mut transcript);
let comm = pcs.commit(black_box(&ml_poly_evals));
pcs.open(&comm, &ml_poly_evals, &open_at, y, &mut transcript);
})
});
}
@@ -86,6 +91,6 @@ fn set_duration() -> Criterion {
criterion_group! {
name = benches;
config = set_duration();
targets = pcs_ecfft_bench
targets = pcs_ecfft_bench, pcs_fft_bench
}
criterion_main!(benches);

View File

@@ -16,7 +16,7 @@ impl FieldExt for halo2curves::pasta::Fp {}
pub use ecfft;
pub use polynomial::eq_poly::EqPoly;
pub use polynomial::sparse_ml_poly::SparseMLPoly;
pub use polynomial::ml_poly::MlPoly;
pub use tensor_rs_pcs::{TensorMLOpening, TensorMultilinearPCS, TensorRSMultilinearPCSConfig};
pub use transcript::{AppendToTranscript, Transcript};
pub use utils::{det_num_cols, det_num_rows};
pub use utils::{det_num_cols, det_num_rows, dot_prod};

View File

@@ -0,0 +1,74 @@
use crate::polynomial::eq_poly::EqPoly;
use crate::FieldExt;
#[derive(Clone, Debug)]
pub struct MlPoly<F> {
pub evals: Vec<F>,
pub num_vars: usize,
}
impl<F: FieldExt> MlPoly<F> {
#[allow(dead_code)]
pub fn new(evals: Vec<F>) -> Self {
assert!(evals.len().is_power_of_two());
let num_vars = (evals.len() as f64).log2() as usize;
Self { evals, num_vars }
}
#[allow(dead_code)]
fn dot_prod(x: &[F], y: &[F]) -> F {
assert_eq!(x.len(), y.len());
let mut result = F::ZERO;
for i in 0..x.len() {
result += x[i] * y[i];
}
result
}
// Evaluate the multilinear extension of the polynomial `a`, at point `t`.
// `a` is in evaluation form.
// `t` should be in big-endian form.
#[allow(dead_code)]
pub fn eval(&self, t: &[F]) -> F {
let n = self.evals.len();
debug_assert_eq!((n as f64).log2() as usize, t.len());
let eq_evals = EqPoly::new(t.to_vec()).evals();
Self::dot_prod(&self.evals, &eq_evals)
}
}
#[cfg(test)]
mod tests {
use super::*;
type F = halo2curves::secp256k1::Fp;
use halo2curves::ff::Field;
#[test]
fn test_ml_poly_eval() {
let num_vars = 4;
let num_evals = 2usize.pow(num_vars as u32);
let evals = (0..num_evals)
.map(|x| F::from(x as u64))
.collect::<Vec<F>>();
let ml_poly = MlPoly::new(evals.clone());
let eval_last = ml_poly.eval(&[F::ONE, F::ONE, F::ONE, F::ONE]);
assert_eq!(
eval_last,
evals[evals.len() - 1],
"The last evaluation is not correct"
);
let eval_first = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ZERO]);
assert_eq!(eval_first, evals[0], "The first evaluation is not correct");
let eval_second = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ONE]);
assert_eq!(
eval_second, evals[1],
"The second evaluation is not correct"
);
}
}

View File

@@ -1,2 +1,2 @@
pub mod eq_poly;
pub mod sparse_ml_poly;
pub mod ml_poly;

View File

@@ -1,83 +0,0 @@
use crate::{EqPoly, FieldExt};
#[derive(Clone, Debug)]
pub struct SparseMLPoly<F> {
pub evals: Vec<(usize, F)>,
pub num_vars: usize,
}
impl<F: FieldExt> SparseMLPoly<F> {
pub fn new(evals: Vec<(usize, F)>, num_vars: usize) -> Self {
Self { evals, num_vars }
}
pub fn num_entries(&self) -> usize {
2usize.pow(self.num_vars as u32)
}
pub fn from_dense(dense_evals: Vec<F>) -> Self {
let sparse_evals = dense_evals
.iter()
.enumerate()
.filter(|(_, eval)| **eval != F::ZERO)
.map(|(i, eval)| (i, *eval))
.collect::<Vec<(usize, F)>>();
let num_vars = (dense_evals.len() as f64).log2() as usize;
Self {
evals: sparse_evals,
num_vars,
}
}
// `t` should be in big-endian form.
pub fn eval(&self, t: &[F]) -> F {
assert_eq!(self.num_vars, t.len());
// Evaluate the multilinear extension of the polynomial `a`,
// over the boolean hypercube
let eq_poly = EqPoly::new(t.to_vec());
let eq_evals = eq_poly.evals();
let mut result = F::ZERO;
for eval in &self.evals {
result += eq_evals[eval.0] * eval.1;
}
result
}
}
#[cfg(test)]
mod tests {
use super::*;
type F = halo2curves::secp256k1::Fp;
use halo2curves::ff::Field;
#[test]
fn test_sparse_ml_poly_eval() {
let num_vars = 4;
let num_evals = 2usize.pow(num_vars as u32);
let evals = (0..num_evals)
.map(|x| F::from((x as u64) as u64))
.collect::<Vec<F>>();
let ml_poly = SparseMLPoly::from_dense(evals.clone());
let eval_last = ml_poly.eval(&[F::ONE, F::ONE, F::ONE, F::ONE]);
assert_eq!(
eval_last,
evals[evals.len() - 1],
"The last evaluation is not correct"
);
let eval_first = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ZERO]);
assert_eq!(eval_first, evals[0], "The first evaluation is not correct");
let eval_second = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ONE]);
assert_eq!(
eval_second, evals[1],
"The second evaluation is not correct"
);
}
}

View File

@@ -6,7 +6,6 @@ use serde::{Deserialize, Serialize};
use crate::fft::fft;
use crate::polynomial::eq_poly::EqPoly;
use crate::polynomial::sparse_ml_poly::SparseMLPoly;
use crate::tensor_code::TensorCode;
use crate::transcript::Transcript;
use crate::utils::{det_num_cols, det_num_rows, dot_prod, hash_all, rlc_rows, sample_indices};
@@ -57,32 +56,32 @@ impl<F: FieldExt> TensorMultilinearPCS<F> {
Self { config }
}
pub fn commit(&self, poly: &SparseMLPoly<F>) -> CommittedTensorCode<F> {
pub fn commit(&self, ml_poly_evals: &[F]) -> CommittedTensorCode<F> {
// Merkle commit to the evaluations of the polynomial
let tensor_code = self.encode_zk(poly);
let tree = tensor_code.commit(
self.config.num_cols(poly.num_entries()),
self.config.num_rows(poly.num_entries()),
);
let n = ml_poly_evals.len();
assert!(n.is_power_of_two());
let tensor_code = self.encode_zk(ml_poly_evals);
let tree = tensor_code.commit(self.config.num_cols(n), self.config.num_rows(n));
tree
}
pub fn open(
&self,
u_hat_comm: &CommittedTensorCode<F>,
poly: &SparseMLPoly<F>,
// TODO: Remove poly and use u_hat_comm
ml_poly_evals: &[F],
point: &[F],
eval: F,
transcript: &mut Transcript<F>,
) -> TensorMLOpening<F> {
let num_cols = self.config.num_cols(poly.num_entries());
let num_rows = self.config.num_rows(poly.num_entries());
debug_assert_eq!(poly.num_vars, point.len());
let n = ml_poly_evals.len();
assert!(n.is_power_of_two());
let num_vars = (n as f64).log2() as usize;
let mut padded_evals = poly.evals.clone();
padded_evals.resize(
num_cols * num_rows,
(2usize.pow(poly.num_vars as u32), F::ZERO),
);
let num_cols = self.config.num_cols(n);
let num_rows = self.config.num_rows(n);
debug_assert_eq!(num_vars, point.len());
// ########################################
// Testing phase
@@ -94,12 +93,7 @@ impl<F: FieldExt> TensorMultilinearPCS<F> {
let r_u = transcript.challenge_vec(num_rows);
let u = (0..num_rows)
.map(|i| {
padded_evals[(i * num_cols)..((i + 1) * num_cols)]
.iter()
.map(|entry| entry.1)
.collect::<Vec<F>>()
})
.map(|i| ml_poly_evals[(i * num_cols)..((i + 1) * num_cols)].to_vec())
.collect::<Vec<Vec<F>>>();
// Random linear combination of the rows of the polynomial in a tensor structure
@@ -140,7 +134,7 @@ impl<F: FieldExt> TensorMultilinearPCS<F> {
TensorMLOpening {
x: point.to_vec(),
y: poly.eval(&point),
y: eval,
eval_query_leaves: eval_queries,
test_query_leaves: test_queries,
u_hat_comm: u_hat_comm.committed_tree.root(),
@@ -151,7 +145,7 @@ impl<F: FieldExt> TensorMultilinearPCS<F> {
base_opening: BaseOpening {
hashes: u_hat_comm.committed_tree.column_roots.clone(),
},
poly_num_vars: poly.num_vars,
poly_num_vars: num_vars,
}
}
}
@@ -341,25 +335,16 @@ impl<F: FieldExt> TensorMultilinearPCS<F> {
u_hat_openings
}
fn encode_zk(&self, poly: &SparseMLPoly<F>) -> TensorCode<F> {
let num_rows = self.config.num_rows(poly.num_entries());
let num_cols = self.config.num_cols(poly.num_entries());
fn encode_zk(&self, ml_poly_evals: &[F]) -> TensorCode<F> {
let n = ml_poly_evals.len();
assert!(n.is_power_of_two());
// Pad the sparse evaluations with zeros
let mut evals = poly.evals.clone();
evals.resize(
num_cols * num_rows,
(2usize.pow(poly.num_vars as u32), F::ZERO),
);
debug_assert_eq!(evals.len(), num_cols * num_rows);
let num_rows = self.config.num_rows(n);
let num_cols = self.config.num_cols(n);
debug_assert_eq!(n, num_cols * num_rows);
let codewords = (0..num_rows)
.map(|i| {
evals[i * num_cols..(i + 1) * num_cols]
.iter()
.map(|entry| entry.1)
.collect::<Vec<F>>()
})
.map(|i| &ml_poly_evals[i * num_cols..(i + 1) * num_cols])
.map(|row| self.split_encode(&row))
.collect::<Vec<Vec<F>>>();
@@ -369,42 +354,44 @@ impl<F: FieldExt> TensorMultilinearPCS<F> {
#[cfg(test)]
mod tests {
use ::ecfft::find_coset_offset;
use super::*;
use crate::polynomial::ml_poly::MlPoly;
use crate::rs_config::{ecfft, good_curves::secp256k1::secp256k1_good_curve, naive, smooth};
const TEST_NUM_VARS: usize = 8;
const TEST_L: usize = 10;
fn test_poly<F: FieldExt>() -> SparseMLPoly<F> {
fn test_poly_evals<F: FieldExt>() -> MlPoly<F> {
let num_entries: usize = 2usize.pow(TEST_NUM_VARS as u32);
let evals = (0..num_entries)
.map(|i| (i, F::from(i as u64)))
.collect::<Vec<(usize, F)>>();
.map(|i| F::from((i + 1) as u64))
.collect::<Vec<F>>();
let ml_poly = SparseMLPoly::new(evals, TEST_NUM_VARS);
ml_poly
MlPoly::new(evals)
}
fn prove_and_verify<F: FieldExt>(ml_poly: SparseMLPoly<F>, pcs: TensorMultilinearPCS<F>) {
let comm = pcs.commit(&ml_poly);
fn prove_and_verify<F: FieldExt>(ml_poly: &MlPoly<F>, pcs: TensorMultilinearPCS<F>) {
let ml_poly_evals = &ml_poly.evals;
let open_at = (0..ml_poly.num_vars)
let comm = pcs.commit(ml_poly_evals);
let ml_poly_num_vars = (ml_poly_evals.len() as f64).log2() as usize;
let open_at = (0..ml_poly_num_vars)
.map(|i| F::from(i as u64))
.collect::<Vec<F>>();
let y = ml_poly.eval(&open_at);
let mut prover_transcript = Transcript::<F>::new(b"test");
prover_transcript.append_bytes(&comm.committed_tree.root);
let opening = pcs.open(&comm, &ml_poly, &open_at, &mut prover_transcript);
let opening = pcs.open(&comm, ml_poly_evals, &open_at, y, &mut prover_transcript);
let mut verifier_transcript = Transcript::<F>::new(b"test");
verifier_transcript.append_bytes(&comm.committed_tree.root);
pcs.verify(&opening, &mut verifier_transcript);
}
fn config_base<F: FieldExt>(ml_poly: &SparseMLPoly<F>) -> TensorRSMultilinearPCSConfig<F> {
fn config_base<F: FieldExt>() -> TensorRSMultilinearPCSConfig<F> {
let expansion_factor = 2;
TensorRSMultilinearPCSConfig::<F> {
@@ -420,23 +407,27 @@ mod tests {
fn test_tensor_pcs_fft() {
type F = halo2curves::pasta::Fp;
// FFT config
let ml_poly = test_poly();
let mut config = config_base(&ml_poly);
config.fft_domain = Some(smooth::gen_config(config.num_cols(ml_poly.num_entries())));
let ml_poly = test_poly_evals();
let mut config = config_base();
// The test polynomial has 2^k non-zero entries
let num_entries = ml_poly.evals.len();
config.fft_domain = Some(smooth::gen_config(config.num_cols(num_entries)));
// Test FFT PCS
let tensor_pcs_fft = TensorMultilinearPCS::<F>::new(config);
prove_and_verify(ml_poly, tensor_pcs_fft);
prove_and_verify(&ml_poly, tensor_pcs_fft);
}
#[test]
fn test_tensor_pcs_ecfft() {
type F = halo2curves::secp256k1::Fp;
let ml_poly = test_poly();
let ml_poly = test_poly_evals();
let mut config = config_base(&ml_poly);
let mut config = config_base();
let num_cols = config.num_cols(ml_poly.num_entries());
let n = ml_poly.evals.len();
let num_cols = config.num_cols(n);
let k = ((num_cols * config.expansion_factor).next_power_of_two() as f64).log2() as usize;
let (curve, coset_offset) = secp256k1_good_curve(k);
@@ -444,21 +435,22 @@ mod tests {
// Test FFT PCS
let tensor_pcs_ecf = TensorMultilinearPCS::<F>::new(config);
prove_and_verify(ml_poly, tensor_pcs_ecf);
prove_and_verify(&ml_poly, tensor_pcs_ecf);
}
#[test]
fn test_tensor_pcs_naive() {
type F = halo2curves::secp256k1::Fp;
// FFT config
let ml_poly = test_poly();
let ml_poly = test_poly_evals();
let n = ml_poly.evals.len();
// Naive config
let mut config = config_base(&ml_poly);
config.domain_powers = Some(naive::gen_config(config.num_cols(ml_poly.num_entries())));
let mut config = config_base();
config.domain_powers = Some(naive::gen_config(config.num_cols(n)));
// Test FFT PCS
let tensor_pcs_naive = TensorMultilinearPCS::<F>::new(config);
prove_and_verify(ml_poly, tensor_pcs_naive);
prove_and_verify(&ml_poly, tensor_pcs_naive);
}
}

View File

@@ -76,6 +76,7 @@ pub fn sample_indices<F: FieldExt>(
}
pub fn det_num_cols(num_entries: usize, l: usize) -> usize {
assert!(num_entries.is_power_of_two());
let num_entries_sqrt = (num_entries as f64).sqrt() as usize;
// The number of columns must be a power of two
// to tensor-query the polynomial evaluation
@@ -84,6 +85,7 @@ pub fn det_num_cols(num_entries: usize, l: usize) -> usize {
}
pub fn det_num_rows(num_entries: usize, l: usize) -> usize {
assert!(num_entries.is_power_of_two());
// The number of rows must be a power of two
// to tensor-query the polynomial evaluation
let num_rows = (num_entries / det_num_cols(num_entries, l)).next_power_of_two();