mirror of
https://github.com/arnaucube/shockwave-plus.git
synced 2026-01-12 17:11:30 +01:00
fix: organize padding
This commit is contained in:
@@ -9,12 +9,13 @@ fn shockwave_plus_bench(c: &mut Criterion) {
|
||||
type F = halo2curves::secp256k1::Fp;
|
||||
|
||||
for exp in [12, 15, 18] {
|
||||
let num_vars = 2usize.pow(exp);
|
||||
let num_cons = 2usize.pow(exp as u32);
|
||||
let num_input = 3;
|
||||
let num_vars = num_cons - num_input;
|
||||
|
||||
let (r1cs, witness) = R1CS::<F>::produce_synthetic_r1cs(num_vars, num_input);
|
||||
|
||||
let mut group = c.benchmark_group(format!("ShockwavePlus num_cons: {}", r1cs.num_cons));
|
||||
let mut group = c.benchmark_group(format!("ShockwavePlus num_cons: {}", r1cs.num_cons()));
|
||||
let l = 319;
|
||||
let num_cols = det_num_cols(r1cs.z_len(), l);
|
||||
|
||||
|
||||
@@ -6,11 +6,13 @@ mod sumcheck;
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sumcheck::{SCPhase1Proof, SCPhase2Proof, SumCheckPhase1, SumCheckPhase2};
|
||||
use tensor_pcs::{ecfft::GoodCurve, *};
|
||||
use tensor_pcs::{ecfft::GoodCurve, MlPoly, *};
|
||||
|
||||
// Exports
|
||||
pub use r1cs::R1CS;
|
||||
|
||||
use crate::polynomial::sparse_ml_poly::SparseMLPoly;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct PartialSpartanProof<F: FieldExt> {
|
||||
pub z_comm: [u8; 32],
|
||||
@@ -69,13 +71,17 @@ impl<F: FieldExt> ShockwavePlus<F> {
|
||||
r1cs_input: &[F],
|
||||
transcript: &mut Transcript<F>,
|
||||
) -> (PartialSpartanProof<F>, Vec<F>) {
|
||||
// Compute the multilinear extension of the witness
|
||||
let witness_poly = SparseMLPoly::from_dense(r1cs_witness.to_vec());
|
||||
// Multilinear extension requires the number of evaluations
|
||||
// to be a power of two to uniquely determine the polynomial
|
||||
let mut padded_r1cs_witness = r1cs_witness.to_vec();
|
||||
padded_r1cs_witness.resize(padded_r1cs_witness.len().next_power_of_two(), F::ZERO);
|
||||
let witness_poly = MlPoly::new(padded_r1cs_witness.clone());
|
||||
|
||||
let Z = R1CS::construct_z(r1cs_witness, r1cs_input);
|
||||
|
||||
// Commit the witness polynomial
|
||||
let comm_witness_timer = start_timer!(|| "Commit witness");
|
||||
let committed_witness = self.pcs.commit(&witness_poly);
|
||||
let committed_witness = self.pcs.commit(&padded_r1cs_witness);
|
||||
let witness_comm = committed_witness.committed_tree.root;
|
||||
end_timer!(comm_witness_timer);
|
||||
|
||||
@@ -89,9 +95,13 @@ impl<F: FieldExt> ShockwavePlus<F> {
|
||||
let m = (self.r1cs.z_len() as f64).log2() as usize;
|
||||
let tau = transcript.challenge_vec(m);
|
||||
|
||||
let Az_poly = self.r1cs.A.mul_vector(&Z);
|
||||
let Bz_poly = self.r1cs.B.mul_vector(&Z);
|
||||
let Cz_poly = self.r1cs.C.mul_vector(&Z);
|
||||
let mut Az_poly = self.r1cs.A.mul_vector(&Z);
|
||||
let mut Bz_poly = self.r1cs.B.mul_vector(&Z);
|
||||
let mut Cz_poly = self.r1cs.C.mul_vector(&Z);
|
||||
|
||||
Az_poly.resize(Z.len(), F::ZERO);
|
||||
Bz_poly.resize(Z.len(), F::ZERO);
|
||||
Cz_poly.resize(Z.len(), F::ZERO);
|
||||
|
||||
// Prove that the
|
||||
// Q(t) = \sum_{x \in {0, 1}^m} (Az_poly(x) * Bz_poly(x) - Cz_poly(x)) eq(t, x)
|
||||
@@ -109,7 +119,6 @@ impl<F: FieldExt> ShockwavePlus<F> {
|
||||
tau.clone(),
|
||||
rx.clone(),
|
||||
);
|
||||
|
||||
let (sc_proof_1, (v_A, v_B, v_C)) = sc_phase_1.prove(&self.pcs, transcript);
|
||||
end_timer!(sc_phase_1_timer);
|
||||
|
||||
@@ -139,9 +148,13 @@ impl<F: FieldExt> ShockwavePlus<F> {
|
||||
|
||||
let z_open_timer = start_timer!(|| "Open witness poly");
|
||||
// Prove the evaluation of the polynomial Z(y) at ry
|
||||
let z_eval_proof = self
|
||||
.pcs
|
||||
.open(&committed_witness, &witness_poly, &ry[1..], transcript);
|
||||
let z_eval_proof = self.pcs.open(
|
||||
&committed_witness,
|
||||
&padded_r1cs_witness,
|
||||
&ry[1..],
|
||||
witness_poly.eval(&ry[1..]),
|
||||
transcript,
|
||||
);
|
||||
end_timer!(z_open_timer);
|
||||
|
||||
// Prove the evaluation of the polynomials A(y), B(y), C(y) at ry
|
||||
@@ -262,7 +275,7 @@ mod tests {
|
||||
fn test_shockwave_plus() {
|
||||
type F = halo2curves::secp256k1::Fp;
|
||||
|
||||
let num_vars = 2usize.pow(6);
|
||||
let num_vars = 10;
|
||||
let num_input = 3;
|
||||
let l = 2;
|
||||
|
||||
@@ -277,7 +290,7 @@ mod tests {
|
||||
let (partial_proof, _) =
|
||||
ShockwavePlus.prove(&witness, &r1cs.public_input, &mut prover_transcript);
|
||||
|
||||
let mut verifier_transcript = Transcript::new(b"bench");
|
||||
ShockwavePlus.verify_partial(&partial_proof, &mut verifier_transcript);
|
||||
// let mut verifier_transcript = Transcript::new(b"bench");
|
||||
// ShockwavePlus.verify_partial(&partial_proof, &mut verifier_transcript);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
use tensor_pcs::EqPoly;
|
||||
|
||||
use crate::FieldExt;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MlPoly<F> {
|
||||
pub evals: Vec<F>,
|
||||
pub num_vars: usize,
|
||||
}
|
||||
|
||||
impl<F: FieldExt> MlPoly<F> {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(evals: Vec<F>) -> Self {
|
||||
assert!(evals.len().is_power_of_two());
|
||||
let num_vars = (evals.len() as f64).log2() as usize;
|
||||
Self { evals, num_vars }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn dot_prod(x: &[F], y: &[F]) -> F {
|
||||
assert_eq!(x.len(), y.len());
|
||||
let mut result = F::ZERO;
|
||||
for i in 0..x.len() {
|
||||
result += x[i] * y[i];
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
// Evaluate the multilinear extension of the polynomial `a`, at point `t`.
|
||||
// `a` is in evaluation form.
|
||||
// `t` should be in big-endian form.
|
||||
#[allow(dead_code)]
|
||||
pub fn eval(&self, t: &[F]) -> F {
|
||||
let n = self.evals.len();
|
||||
debug_assert_eq!((n as f64).log2() as usize, t.len());
|
||||
|
||||
let eq_evals = EqPoly::new(t.to_vec()).evals();
|
||||
|
||||
Self::dot_prod(&self.evals, &eq_evals)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
type F = halo2curves::secp256k1::Fp;
|
||||
use halo2curves::ff::Field;
|
||||
|
||||
#[test]
|
||||
fn test_ml_poly_eval() {
|
||||
let num_vars = 4;
|
||||
let num_evals = 2usize.pow(num_vars as u32);
|
||||
let evals = (0..num_evals)
|
||||
.map(|x| F::from(x as u64))
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
let ml_poly = MlPoly::new(evals.clone());
|
||||
let eval_last = ml_poly.eval(&[F::ONE, F::ONE, F::ONE, F::ONE]);
|
||||
assert_eq!(
|
||||
eval_last,
|
||||
evals[evals.len() - 1],
|
||||
"The last evaluation is not correct"
|
||||
);
|
||||
|
||||
let eval_first = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ZERO]);
|
||||
assert_eq!(eval_first, evals[0], "The first evaluation is not correct");
|
||||
|
||||
let eval_second = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ONE]);
|
||||
assert_eq!(
|
||||
eval_second, evals[1],
|
||||
"The second evaluation is not correct"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
pub mod ml_poly;
|
||||
pub mod sparse_ml_poly;
|
||||
|
||||
84
shockwave_plus/src/polynomial/sparse_ml_poly.rs
Normal file
84
shockwave_plus/src/polynomial/sparse_ml_poly.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use crate::{EqPoly, FieldExt};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SparseMLPoly<F> {
|
||||
pub evals: Vec<(usize, F)>,
|
||||
pub num_vars: usize,
|
||||
}
|
||||
|
||||
impl<F: FieldExt> SparseMLPoly<F> {
|
||||
pub fn new(evals: Vec<(usize, F)>, num_vars: usize) -> Self {
|
||||
Self { evals, num_vars }
|
||||
}
|
||||
|
||||
pub fn num_entries(&self) -> usize {
|
||||
2usize.pow(self.num_vars as u32)
|
||||
}
|
||||
|
||||
pub fn from_dense(dense_evals: Vec<F>) -> Self {
|
||||
assert!(dense_evals.len().is_power_of_two());
|
||||
let sparse_evals = dense_evals
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, eval)| **eval != F::ZERO)
|
||||
.map(|(i, eval)| (i, *eval))
|
||||
.collect::<Vec<(usize, F)>>();
|
||||
let num_vars = (dense_evals.len() as f64).log2() as usize;
|
||||
|
||||
Self {
|
||||
evals: sparse_evals,
|
||||
num_vars,
|
||||
}
|
||||
}
|
||||
|
||||
// `t` should be in big-endian form.
|
||||
pub fn eval(&self, t: &[F]) -> F {
|
||||
assert_eq!(self.num_vars, t.len());
|
||||
// Evaluate the multilinear extension of the polynomial `a`,
|
||||
// over the boolean hypercube
|
||||
|
||||
let eq_poly = EqPoly::new(t.to_vec());
|
||||
let eq_evals = eq_poly.evals();
|
||||
|
||||
let mut result = F::ZERO;
|
||||
|
||||
for eval in &self.evals {
|
||||
result += eq_evals[eval.0] * eval.1;
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
type F = halo2curves::secp256k1::Fp;
|
||||
use halo2curves::ff::Field;
|
||||
|
||||
#[test]
|
||||
fn test_sparse_ml_poly_eval() {
|
||||
let num_vars = 4;
|
||||
let num_evals = 2usize.pow(num_vars as u32);
|
||||
let evals = (0..num_evals)
|
||||
.map(|x| F::from((x as u64) as u64))
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
let ml_poly = SparseMLPoly::from_dense(evals.clone());
|
||||
let eval_last = ml_poly.eval(&[F::ONE, F::ONE, F::ONE, F::ONE]);
|
||||
assert_eq!(
|
||||
eval_last,
|
||||
evals[evals.len() - 1],
|
||||
"The last evaluation is not correct"
|
||||
);
|
||||
|
||||
let eval_first = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ZERO]);
|
||||
assert_eq!(eval_first, evals[0], "The first evaluation is not correct");
|
||||
|
||||
let eval_second = ml_poly.eval(&[F::ZERO, F::ZERO, F::ZERO, F::ONE]);
|
||||
assert_eq!(
|
||||
eval_second, evals[1],
|
||||
"The second evaluation is not correct"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::polynomial::sparse_ml_poly::SparseMLPoly;
|
||||
use crate::FieldExt;
|
||||
use tensor_pcs::SparseMLPoly;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SparseMatrixEntry<F: FieldExt> {
|
||||
pub row: usize,
|
||||
pub col: usize,
|
||||
@@ -52,7 +52,10 @@ where
|
||||
let val = entries[i].val;
|
||||
evals.push(((row * num_cols) + col, val));
|
||||
}
|
||||
let ml_poly_num_vars = ((self.num_cols * self.num_rows) as f64).log2() as usize;
|
||||
let ml_poly_num_vars = ((self.num_cols.next_power_of_two()
|
||||
* self.num_rows.next_power_of_two()) as f64)
|
||||
.log2() as usize;
|
||||
|
||||
let ml_poly = SparseMLPoly::new(evals, ml_poly_num_vars);
|
||||
ml_poly
|
||||
}
|
||||
@@ -131,7 +134,6 @@ where
|
||||
pub B: Matrix<F>,
|
||||
pub C: Matrix<F>,
|
||||
pub public_input: Vec<F>,
|
||||
pub num_cons: usize,
|
||||
pub num_vars: usize,
|
||||
pub num_input: usize,
|
||||
}
|
||||
@@ -149,6 +151,10 @@ where
|
||||
result
|
||||
}
|
||||
|
||||
pub fn num_cons(&self) -> usize {
|
||||
self.A.entries.len()
|
||||
}
|
||||
|
||||
pub fn z_len(&self) -> usize {
|
||||
((self.num_vars.next_power_of_two() + 1) + self.num_input).next_power_of_two()
|
||||
}
|
||||
@@ -185,11 +191,47 @@ where
|
||||
let mut B_entries: Vec<SparseMatrixEntry<F>> = vec![];
|
||||
let mut C_entries: Vec<SparseMatrixEntry<F>> = vec![];
|
||||
|
||||
let num_cons = z.len();
|
||||
for i in 0..num_cons {
|
||||
let A_col = i % num_cons;
|
||||
let B_col = (i + 1) % num_cons;
|
||||
let C_col = (i + 2) % num_cons;
|
||||
// Constrain the variables
|
||||
for i in 0..num_vars {
|
||||
let A_col = i % num_vars;
|
||||
let B_col = (i + 1) % num_vars;
|
||||
let C_col = (i + 2) % num_vars;
|
||||
|
||||
// For the i'th constraint,
|
||||
// add the value 1 at the (i % num_vars)th column of A, B.
|
||||
// Compute the corresponding C_column value so that A_i * B_i = C_i
|
||||
// we apply multiplication since the Hadamard product is computed for Az ・ Bz,
|
||||
|
||||
// We only _enable_ a single variable in each constraint.
|
||||
let AB = if z[C_col] == F::ZERO { F::ZERO } else { F::ONE };
|
||||
|
||||
A_entries.push(SparseMatrixEntry {
|
||||
row: i,
|
||||
col: A_col,
|
||||
val: AB,
|
||||
});
|
||||
B_entries.push(SparseMatrixEntry {
|
||||
row: i,
|
||||
col: B_col,
|
||||
val: AB,
|
||||
});
|
||||
C_entries.push(SparseMatrixEntry {
|
||||
row: i,
|
||||
col: C_col,
|
||||
val: if z[C_col] == F::ZERO {
|
||||
F::ZERO
|
||||
} else {
|
||||
(z[A_col] * z[B_col]) * z[C_col].invert().unwrap()
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Constrain the public inputs
|
||||
let input_index_start = num_vars.next_power_of_two() + 1;
|
||||
for i in input_index_start..(input_index_start + num_input) {
|
||||
let A_col = i;
|
||||
let B_col = (i + 1) % input_index_start + num_input;
|
||||
let C_col = (i + 2) % input_index_start + num_input;
|
||||
|
||||
// For the i'th constraint,
|
||||
// add the value 1 at the (i % num_vars)th column of A, B.
|
||||
@@ -221,7 +263,7 @@ where
|
||||
}
|
||||
|
||||
let num_cols = z.len();
|
||||
let num_rows = num_cols;
|
||||
let num_rows = z.len();
|
||||
|
||||
let A = Matrix::new(A_entries, num_cols, num_rows);
|
||||
let B = Matrix::new(B_entries, num_cols, num_rows);
|
||||
@@ -233,7 +275,6 @@ where
|
||||
B,
|
||||
C,
|
||||
public_input,
|
||||
num_cons,
|
||||
num_vars,
|
||||
num_input,
|
||||
},
|
||||
@@ -258,7 +299,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
type F = halo2curves::secp256k1::Fp;
|
||||
use crate::polynomial::ml_poly::MlPoly;
|
||||
use tensor_pcs::MlPoly;
|
||||
|
||||
// Returns a vector of vectors of length m, where each vector is a boolean vector (big endian)
|
||||
fn boolean_hypercube<F: FieldExt>(m: usize) -> Vec<Vec<F>> {
|
||||
@@ -286,6 +327,7 @@ mod tests {
|
||||
let num_vars = num_cons - num_input;
|
||||
|
||||
let (r1cs, mut witness) = R1CS::<F>::produce_synthetic_r1cs(num_vars, num_input);
|
||||
assert_eq!(r1cs.num_cons(), num_cons);
|
||||
|
||||
assert_eq!(witness.len(), num_vars);
|
||||
assert_eq!(r1cs.public_input.len(), num_input);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::sumcheck::unipoly::UniPoly;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tensor_pcs::{EqPoly, SparseMLPoly, TensorMLOpening, TensorMultilinearPCS, Transcript};
|
||||
use tensor_pcs::{EqPoly, MlPoly, TensorMLOpening, TensorMultilinearPCS, Transcript};
|
||||
|
||||
use crate::FieldExt;
|
||||
|
||||
@@ -50,13 +50,13 @@ impl<F: FieldExt> SumCheckPhase1<F> {
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
// Sample a blinding polynomial g(x_1, ..., x_m)
|
||||
let random_evals = (0..2usize.pow(num_vars as u32))
|
||||
let blinder_poly_evals = (0..2usize.pow(num_vars as u32))
|
||||
.map(|_| F::random(&mut rng))
|
||||
.collect::<Vec<F>>();
|
||||
let blinder_poly_sum = random_evals.iter().fold(F::ZERO, |acc, x| acc + x);
|
||||
let blinder_poly = SparseMLPoly::from_dense(random_evals);
|
||||
let blinder_poly = MlPoly::new(blinder_poly_evals.clone());
|
||||
let blinder_poly_sum = blinder_poly_evals.iter().fold(F::ZERO, |acc, x| acc + x);
|
||||
|
||||
let blinder_poly_comm = pcs.commit(&blinder_poly);
|
||||
let blinder_poly_comm = pcs.commit(&blinder_poly_evals);
|
||||
|
||||
transcript.append_fe(&blinder_poly_sum);
|
||||
transcript.append_bytes(&blinder_poly_comm.committed_tree.root);
|
||||
@@ -70,11 +70,7 @@ impl<F: FieldExt> SumCheckPhase1<F> {
|
||||
let mut A_table = self.Az_evals.clone();
|
||||
let mut B_table = self.Bz_evals.clone();
|
||||
let mut C_table = self.Cz_evals.clone();
|
||||
let mut blinder_table = blinder_poly
|
||||
.evals
|
||||
.iter()
|
||||
.map(|(_, x)| *x)
|
||||
.collect::<Vec<F>>();
|
||||
let mut blinder_table = blinder_poly_evals.clone();
|
||||
let mut eq_table = self.bound_eq_poly.evals();
|
||||
|
||||
let zero = F::ZERO;
|
||||
@@ -122,8 +118,9 @@ impl<F: FieldExt> SumCheckPhase1<F> {
|
||||
// Prove the evaluation of the blinder polynomial at rx.
|
||||
let blinder_poly_eval_proof = pcs.open(
|
||||
&blinder_poly_comm,
|
||||
&blinder_poly,
|
||||
&blinder_poly_evals,
|
||||
&self.challenge,
|
||||
blinder_poly.eval(&self.challenge),
|
||||
transcript,
|
||||
);
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use crate::r1cs::r1cs::Matrix;
|
||||
use crate::sumcheck::unipoly::UniPoly;
|
||||
use crate::FieldExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tensor_pcs::{EqPoly, SparseMLPoly, TensorMLOpening, TensorMultilinearPCS, Transcript};
|
||||
use tensor_pcs::{EqPoly, MlPoly, TensorMLOpening, TensorMultilinearPCS, Transcript};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct SCPhase2Proof<F: FieldExt> {
|
||||
@@ -71,12 +71,12 @@ impl<F: FieldExt> SumCheckPhase2<F> {
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
// Sample a blinding polynomial g(x_1, ..., x_m) of degree 3
|
||||
let random_evals = (0..2usize.pow(num_vars as u32))
|
||||
let blinder_poly_evals = (0..2usize.pow(num_vars as u32))
|
||||
.map(|_| F::random(&mut rng))
|
||||
.collect::<Vec<F>>();
|
||||
let blinder_poly_sum = random_evals.iter().fold(F::ZERO, |acc, x| acc + x);
|
||||
let blinder_poly = SparseMLPoly::from_dense(random_evals);
|
||||
let blinder_poly_comm = pcs.commit(&blinder_poly);
|
||||
let blinder_poly_sum = blinder_poly_evals.iter().fold(F::ZERO, |acc, x| acc + x);
|
||||
let blinder_poly = MlPoly::new(blinder_poly_evals.clone());
|
||||
let blinder_poly_comm = pcs.commit(&blinder_poly_evals);
|
||||
|
||||
transcript.append_fe(&blinder_poly_sum);
|
||||
transcript.append_bytes(&blinder_poly_comm.committed_tree.root);
|
||||
@@ -89,11 +89,7 @@ impl<F: FieldExt> SumCheckPhase2<F> {
|
||||
let mut B_table = B_evals.clone();
|
||||
let mut C_table = C_evals.clone();
|
||||
let mut Z_table = self.Z_evals.clone();
|
||||
let mut blinder_table = blinder_poly
|
||||
.evals
|
||||
.iter()
|
||||
.map(|(_, x)| *x)
|
||||
.collect::<Vec<F>>();
|
||||
let mut blinder_table = blinder_poly_evals.clone();
|
||||
|
||||
let zero = F::ZERO;
|
||||
let one = F::ONE;
|
||||
@@ -130,7 +126,13 @@ impl<F: FieldExt> SumCheckPhase2<F> {
|
||||
|
||||
let ry = self.challenge.clone();
|
||||
|
||||
let blinder_poly_eval_proof = pcs.open(&blinder_poly_comm, &blinder_poly, &ry, transcript);
|
||||
let blinder_poly_eval_proof = pcs.open(
|
||||
&blinder_poly_comm,
|
||||
&blinder_poly_evals,
|
||||
&ry,
|
||||
blinder_poly.eval(&ry),
|
||||
transcript,
|
||||
);
|
||||
|
||||
SCPhase2Proof {
|
||||
round_polys,
|
||||
|
||||
Reference in New Issue
Block a user