diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index b3752ac..df6ba15 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -7,7 +7,7 @@ on: branches: [ master ] jobs: - build: + build_nightly: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 diff --git a/Cargo.toml b/Cargo.toml index 430d744..61f8d8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "spartan" version = "0.6.0" authors = ["Srinath Setty "] -edition = "2018" +edition = "2021" description = "High-speed zkSNARKs without trusted setup" documentation = "https://docs.rs/spartan/" readme = "README.md" diff --git a/src/dense_mlpoly.rs b/src/dense_mlpoly.rs index 462d94e..d0ebb34 100644 --- a/src/dense_mlpoly.rs +++ b/src/dense_mlpoly.rs @@ -121,7 +121,7 @@ impl IdentityPolynomial { impl DensePolynomial { pub fn new(Z: Vec) -> Self { DensePolynomial { - num_vars: Z.len().log2() as usize, + num_vars: Z.len().log_2() as usize, len: Z.len(), Z, } diff --git a/src/lib.rs b/src/lib.rs index b9faf7e..736a3d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,4 @@ #![allow(non_snake_case)] -#![feature(test)] -#![feature(int_log)] #![doc = include_str!("../README.md")] #![deny(missing_docs)] #![allow(clippy::assertions_on_result_states)] diff --git a/src/math.rs b/src/math.rs index 085c105..33e9e14 100644 --- a/src/math.rs +++ b/src/math.rs @@ -2,6 +2,7 @@ pub trait Math { fn square_root(self) -> usize; fn pow2(self) -> usize; fn get_bits(self, num_bits: usize) -> Vec; + fn log_2(self) -> usize; } impl Math for usize { @@ -22,4 +23,14 @@ impl Math for usize { .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) .collect::>() } + + fn log_2(self) -> usize { + assert_ne!(self, 0); + + if self.is_power_of_two() { + (1usize.leading_zeros() - self.leading_zeros()) as usize + } else { + (0usize.leading_zeros() - self.leading_zeros()) as usize + } + } } diff --git a/src/nizk/bullet.rs b/src/nizk/bullet.rs index 79419e8..6a4cff5 100644 --- a/src/nizk/bullet.rs +++ b/src/nizk/bullet.rs @@ -4,15 +4,19 @@ #![allow(clippy::type_complexity)] #![allow(clippy::too_many_arguments)] use super::super::errors::ProofVerifyError; -use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul, CompressGroupElement, DecompressGroupElement}; +use super::super::group::{ + CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, + VartimeMultiscalarMul, +}; +use super::super::math::Math; use super::super::scalar::Scalar; use super::super::transcript::ProofTranscript; +use ark_ff::{fields, Field}; +use ark_serialize::*; +use ark_std::{One, Zero}; use core::iter; -use std::ops::MulAssign; use merlin::Transcript; -use ark_serialize::*; -use ark_ff::{Field, fields}; -use ark_std::{One, Zero}; +use std::ops::MulAssign; #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct BulletReductionProof { @@ -58,7 +62,7 @@ impl BulletReductionProof { // All of the input vectors must have a length that is a power of two. let mut n = G.len(); assert!(n.is_power_of_two()); - let lg_n = n.log2() as usize; + let lg_n = n.log_2() as usize; // All of the input vectors must have the same length. assert_eq!(G.len(), n); @@ -86,16 +90,34 @@ impl BulletReductionProof { a_L .iter() .chain(iter::once(&c_L)) - .chain(iter::once(blind_L)).map(|s| *s).collect::>().as_slice(), - G_R.iter().chain(iter::once(Q)).chain(iter::once(H)).map(|p| *p).collect::>().as_slice(), + .chain(iter::once(blind_L)) + .map(|s| *s) + .collect::>() + .as_slice(), + G_R + .iter() + .chain(iter::once(Q)) + .chain(iter::once(H)) + .map(|p| *p) + .collect::>() + .as_slice(), ); let R = GroupElement::vartime_multiscalar_mul( a_R .iter() .chain(iter::once(&c_R)) - .chain(iter::once(blind_R)).map(|s| *s).collect::>().as_slice(), - G_L.iter().chain(iter::once(Q)).chain(iter::once(H)).map(|p| *p).collect::>().as_slice(), + .chain(iter::once(blind_R)) + .map(|s| *s) + .collect::>() + .as_slice(), + G_L + .iter() + .chain(iter::once(Q)) + .chain(iter::once(H)) + .map(|p| *p) + .collect::>() + .as_slice(), ); transcript.append_point(b"L", &L.compress()); @@ -161,7 +183,7 @@ impl BulletReductionProof { // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 let mut challenges_inv: Vec = challenges.clone(); - + ark_ff::fields::batch_inversion(&mut challenges_inv); let mut allinv: Scalar = Scalar::one(); for c in challenges.iter().filter(|s| !s.is_zero()) { @@ -225,8 +247,16 @@ impl BulletReductionProof { u_sq .iter() .chain(u_inv_sq.iter()) - .chain(iter::once(&Scalar::one())).map(|s| *s).collect::>().as_slice(), - Ls.iter().chain(Rs.iter()).chain(iter::once(Gamma)).map(|p| *p).collect::>().as_slice(), + .chain(iter::once(&Scalar::one())) + .map(|s| *s) + .collect::>() + .as_slice(), + Ls.iter() + .chain(Rs.iter()) + .chain(iter::once(Gamma)) + .map(|p| *p) + .collect::>() + .as_slice(), ); Ok((G_hat, Gamma_hat, a_hat)) diff --git a/src/nizk/mod.rs b/src/nizk/mod.rs index ea147de..48cc0d6 100644 --- a/src/nizk/mod.rs +++ b/src/nizk/mod.rs @@ -2,15 +2,18 @@ use super::commitments::{Commitments, MultiCommitGens}; use super::errors::ProofVerifyError; use super::group::{ - CompressedGroup, CompressGroupElement, UnpackGroupElement, GroupElement, DecompressGroupElement, GroupElementAffine}; + CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, GroupElementAffine, + UnpackGroupElement, +}; +use super::math::Math; use super::random::RandomTape; use super::scalar::Scalar; use super::transcript::{AppendToTranscript, ProofTranscript}; use ark_ec::group::Group; -use merlin::Transcript; -use ark_serialize::*; use ark_ec::ProjectiveCurve; use ark_ff::PrimeField; +use ark_serialize::*; +use merlin::Transcript; mod bullet; use bullet::BulletReductionProof; @@ -49,7 +52,7 @@ impl KnowledgeProof { let c = transcript.challenge_scalar(b"c"); let z1 = c * x + t1; - let z2 = c * r + t2; + let z2 = c * r + t2; (KnowledgeProof { alpha, z1, z2 }, C) } @@ -58,8 +61,7 @@ impl KnowledgeProof { &self, gens_n: &MultiCommitGens, transcript: &mut Transcript, - C: & - CompressedGroup, + C: &CompressedGroup, ) -> Result<(), ProofVerifyError> { transcript.append_protocol_name(KnowledgeProof::protocol_name()); C.append_to_transcript(b"C", transcript); @@ -68,7 +70,7 @@ impl KnowledgeProof { let c = transcript.challenge_scalar(b"c"); let lhs = self.z1.commit(&self.z2, gens_n).compress(); - let rhs = ( C.unpack()?.mul(c.into_repr()) + self.alpha.unpack()?).compress(); + let rhs = (C.unpack()?.mul(c.into_repr()) + self.alpha.unpack()?).compress(); if lhs == rhs { Ok(()) @@ -80,8 +82,7 @@ impl KnowledgeProof { #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct EqualityProof { - alpha: - CompressedGroup, + alpha: CompressedGroup, z: Scalar, } @@ -98,9 +99,7 @@ impl EqualityProof { s1: &Scalar, v2: &Scalar, s2: &Scalar, - ) -> (EqualityProof, - CompressedGroup, - CompressedGroup) { + ) -> (EqualityProof, CompressedGroup, CompressedGroup) { transcript.append_protocol_name(EqualityProof::protocol_name()); // produce a random Scalar @@ -126,10 +125,8 @@ impl EqualityProof { &self, gens_n: &MultiCommitGens, transcript: &mut Transcript, - C1: & - CompressedGroup, - C2: & - CompressedGroup, + C1: &CompressedGroup, + C2: &CompressedGroup, ) -> Result<(), ProofVerifyError> { transcript.append_protocol_name(EqualityProof::protocol_name()); C1.append_to_transcript(b"C1", transcript); @@ -155,12 +152,9 @@ impl EqualityProof { #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct ProductProof { - alpha: - CompressedGroup, - beta: - CompressedGroup, - delta: - CompressedGroup, + alpha: CompressedGroup, + beta: CompressedGroup, + delta: CompressedGroup, z: Vec, } @@ -181,11 +175,8 @@ impl ProductProof { rZ: &Scalar, ) -> ( ProductProof, - CompressedGroup, - CompressedGroup, - CompressedGroup, ) { transcript.append_protocol_name(ProductProof::protocol_name()); @@ -197,16 +188,14 @@ impl ProductProof { let b4 = random_tape.random_scalar(b"b4"); let b5 = random_tape.random_scalar(b"b5"); - let X_unc = x.commit(rX, gens_n); - - + let X_unc = x.commit(rX, gens_n); + let X = X_unc.compress(); X.append_to_transcript(b"X", transcript); let X_new = GroupElement::decompress(&X); assert_eq!(X_unc, X_new.unwrap()); - let Y = y.commit(rY, gens_n).compress(); Y.append_to_transcript(b"Y", transcript); @@ -253,17 +242,17 @@ impl ProductProof { } fn check_equality( - P: & - CompressedGroup, - X: & - CompressedGroup, + P: &CompressedGroup, + X: &CompressedGroup, c: &Scalar, gens_n: &MultiCommitGens, z1: &Scalar, z2: &Scalar, ) -> bool { println!("{:?}", X); - let lhs = (GroupElement::decompress(P).unwrap() + GroupElement::decompress(X).unwrap().mul(c.into_repr())).compress(); + let lhs = (GroupElement::decompress(P).unwrap() + + GroupElement::decompress(X).unwrap().mul(c.into_repr())) + .compress(); let rhs = z1.commit(z2, gens_n).compress(); lhs == rhs @@ -273,12 +262,9 @@ impl ProductProof { &self, gens_n: &MultiCommitGens, transcript: &mut Transcript, - X: & - CompressedGroup, - Y: & - CompressedGroup, - Z: & - CompressedGroup, + X: &CompressedGroup, + Y: &CompressedGroup, + Z: &CompressedGroup, ) -> Result<(), ProofVerifyError> { transcript.append_protocol_name(ProductProof::protocol_name()); @@ -321,10 +307,8 @@ impl ProductProof { #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct DotProductProof { - delta: - CompressedGroup, - beta: - CompressedGroup, + delta: CompressedGroup, + beta: CompressedGroup, z: Vec, z_delta: Scalar, z_beta: Scalar, @@ -350,9 +334,7 @@ impl DotProductProof { a_vec: &[Scalar], y: &Scalar, blind_y: &Scalar, - ) -> (DotProductProof, - CompressedGroup, - CompressedGroup) { + ) -> (DotProductProof, CompressedGroup, CompressedGroup) { transcript.append_protocol_name(DotProductProof::protocol_name()); let n = x_vec.len(); @@ -409,10 +391,8 @@ impl DotProductProof { gens_n: &MultiCommitGens, transcript: &mut Transcript, a: &[Scalar], - Cx: & - CompressedGroup, - Cy: & - CompressedGroup, + Cx: &CompressedGroup, + Cy: &CompressedGroup, ) -> Result<(), ProofVerifyError> { assert_eq!(gens_n.n, a.len()); assert_eq!(gens_1.n, 1); @@ -426,11 +406,12 @@ impl DotProductProof { let c = transcript.challenge_scalar(b"c"); - let mut result = - Cx.unpack()?.mul(c.into_repr()) + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n); + let mut result = Cx.unpack()?.mul(c.into_repr()) + self.delta.unpack()? + == self.z.commit(&self.z_delta, gens_n); let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a); - result &= Cy.unpack()?.mul(c.into_repr()) + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1); + result &= Cy.unpack()?.mul(c.into_repr()) + self.beta.unpack()? + == dotproduct_z_a.commit(&self.z_beta, gens_1); if result { Ok(()) } else { @@ -455,10 +436,8 @@ impl DotProductProofGens { #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct DotProductProofLog { bullet_reduction_proof: BulletReductionProof, - delta: - CompressedGroup, - beta: - CompressedGroup, + delta: CompressedGroup, + beta: CompressedGroup, z1: Scalar, z2: Scalar, } @@ -482,9 +461,7 @@ impl DotProductProofLog { a_vec: &[Scalar], y: &Scalar, blind_y: &Scalar, - ) -> (DotProductProofLog, - CompressedGroup, - CompressedGroup) { + ) -> (DotProductProofLog, CompressedGroup, CompressedGroup) { transcript.append_protocol_name(DotProductProofLog::protocol_name()); let n = x_vec.len(); @@ -496,8 +473,8 @@ impl DotProductProofLog { let r_delta = random_tape.random_scalar(b"r_delta"); let r_beta = random_tape.random_scalar(b"r_delta"); let blinds_vec = { - let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log2() as usize); - let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log2() as usize); + let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2()); + let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2()); (0..v1.len()) .map(|i| (v1[i], v2[i])) .collect::>() @@ -562,10 +539,8 @@ impl DotProductProofLog { gens: &DotProductProofGens, transcript: &mut Transcript, a: &[Scalar], - Cx: & - CompressedGroup, - Cy: & - CompressedGroup, + Cx: &CompressedGroup, + Cy: &CompressedGroup, ) -> Result<(), ProofVerifyError> { assert_eq!(gens.n, n); assert_eq!(a.len(), n); @@ -593,8 +568,11 @@ impl DotProductProofLog { let z1_s = &self.z1; let z2_s = &self.z2; - let lhs = ((Gamma_hat.mul(c_s.into_repr()) + beta_s).mul(a_hat_s.into_repr()) + delta_s).compress(); - let rhs = ((g_hat + gens.gens_1.G[0].mul(a_hat_s.into_repr())).mul(z1_s.into_repr()) + gens.gens_1.h.mul(z2_s.into_repr())).compress(); + let lhs = + ((Gamma_hat.mul(c_s.into_repr()) + beta_s).mul(a_hat_s.into_repr()) + delta_s).compress(); + let rhs = ((g_hat + gens.gens_1.G[0].mul(a_hat_s.into_repr())).mul(z1_s.into_repr()) + + gens.gens_1.h.mul(z2_s.into_repr())) + .compress(); assert_eq!(lhs, rhs); @@ -610,15 +588,15 @@ impl DotProductProofLog { mod tests { use std::marker::PhantomData; -use crate::group::VartimeMultiscalarMul; + use crate::group::VartimeMultiscalarMul; -use super::*; -use ark_bls12_377::{G1Affine, Fq, FqParameters}; -use ark_ff::{Fp384, BigInteger384}; -use ark_std::{UniformRand}; + use super::*; + use ark_bls12_377::{Fq, FqParameters, G1Affine}; + use ark_ff::{BigInteger384, Fp384}; + use ark_std::UniformRand; #[test] fn check_knowledgeproof() { - let mut rng = ark_std::rand::thread_rng(); + let mut rng = ark_std::rand::thread_rng(); let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof"); @@ -638,7 +616,7 @@ use ark_std::{UniformRand}; #[test] fn check_equalityproof() { - let mut rng = ark_std::rand::thread_rng(); + let mut rng = ark_std::rand::thread_rng(); let gens_1 = MultiCommitGens::new(1, b"test-equalityproof"); let v1 = Scalar::rand(&mut rng); @@ -663,14 +641,14 @@ use ark_std::{UniformRand}; .verify(&gens_1, &mut verifier_transcript, &C1, &C2) .is_ok()); } - + #[test] fn check_productproof() { - let mut rng = ark_std::rand::thread_rng(); - let pt = GroupElement::rand(&mut rng); - let pt_c = pt.compress(); - let pt2 = GroupElement::decompress(&pt_c).unwrap(); - assert_eq!(pt, pt2); + let mut rng = ark_std::rand::thread_rng(); + let pt = GroupElement::rand(&mut rng); + let pt_c = pt.compress(); + let pt2 = GroupElement::decompress(&pt_c).unwrap(); + assert_eq!(pt, pt2); let gens_1 = MultiCommitGens::new(1, b"test-productproof"); let x = Scalar::rand(&mut rng); @@ -702,7 +680,7 @@ use ark_std::{UniformRand}; #[test] fn check_dotproductproof() { - let mut rng = ark_std::rand::thread_rng(); + let mut rng = ark_std::rand::thread_rng(); let n = 1024; @@ -741,7 +719,7 @@ use ark_std::{UniformRand}; #[test] fn check_dotproductproof_log() { - let mut rng = ark_std::rand::thread_rng(); + let mut rng = ark_std::rand::thread_rng(); let n = 1024; diff --git a/src/product_tree.rs b/src/product_tree.rs index e294f9e..1c44941 100644 --- a/src/product_tree.rs +++ b/src/product_tree.rs @@ -1,6 +1,7 @@ #![allow(dead_code)] use super::dense_mlpoly::DensePolynomial; use super::dense_mlpoly::EqPolynomial; +use super::math::Math; use super::scalar::Scalar; use super::sumcheck::SumcheckInstanceProof; use super::transcript::ProofTranscript; @@ -37,7 +38,7 @@ impl ProductCircuit { let mut left_vec: Vec = Vec::new(); let mut right_vec: Vec = Vec::new(); - let num_layers = poly.len().log2() as usize; + let num_layers = poly.len().log_2() as usize; let (outp_left, outp_right) = poly.split(poly.len() / 2); left_vec.push(outp_left); @@ -182,7 +183,7 @@ impl ProductCircuitEvalProof { let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); assert_eq!(poly_C.len(), len / 2); - let num_rounds_prod = poly_C.len().log2() as usize; + let num_rounds_prod = poly_C.len().log_2() as usize; let comb_func_prod = |poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| @@ -223,7 +224,7 @@ impl ProductCircuitEvalProof { len: usize, transcript: &mut Transcript, ) -> (Scalar, Vec) { - let num_layers = len.log2() as usize; + let num_layers = len.log_2() as usize; let mut claim = eval; let mut rand: Vec = Vec::new(); //let mut num_rounds = 0; @@ -279,7 +280,7 @@ impl ProductCircuitEvalProofBatched { let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); assert_eq!(poly_C_par.len(), len / 2); - let num_rounds_prod = poly_C_par.len().log2() as usize; + let num_rounds_prod = poly_C_par.len().log_2() as usize; let comb_func_prod = |poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| @@ -389,7 +390,7 @@ impl ProductCircuitEvalProofBatched { len: usize, transcript: &mut Transcript, ) -> (Vec, Vec, Vec) { - let num_layers = len.log2() as usize; + let num_layers = len.log_2() as usize; let mut rand: Vec = Vec::new(); //let mut num_rounds = 0; assert_eq!(self.proof.len(), num_layers); diff --git a/src/r1csinstance.rs b/src/r1csinstance.rs index 53a5d3f..c144d1c 100644 --- a/src/r1csinstance.rs +++ b/src/r1csinstance.rs @@ -46,8 +46,8 @@ impl R1CSCommitmentGens { num_nz_entries: usize, ) -> R1CSCommitmentGens { assert!(num_inputs < num_vars); - let num_poly_vars_x = num_cons.log2() as usize; - let num_poly_vars_y = (2 * num_vars).log2() as usize; + let num_poly_vars_x = num_cons.log_2() as usize; + let num_poly_vars_y = (2 * num_vars).log_2() as usize; let gens = SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3); R1CSCommitmentGens { gens } @@ -115,8 +115,8 @@ impl R1CSInstance { assert!(num_inputs < num_vars); // no errors, so create polynomials - let num_poly_vars_x = num_cons.log2() as usize; - let num_poly_vars_y = (2 * num_vars).log2() as usize; + let num_poly_vars_x = num_cons.log_2() as usize; + let num_poly_vars_y = (2 * num_vars).log_2() as usize; let mat_A = (0..A.len()) .map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2)) @@ -166,8 +166,8 @@ impl R1CSInstance { let mut rng = ark_std::rand::thread_rng(); // assert num_cons and num_vars are power of 2 - assert_eq!((num_cons.log2() as usize).pow2(), num_cons); - assert_eq!((num_vars.log2() as usize).pow2(), num_vars); + assert_eq!((num_cons.log_2() as usize).pow2(), num_cons); + assert_eq!((num_vars.log_2() as usize).pow2(), num_vars); // num_inputs + 1 <= num_vars assert!(num_inputs < num_vars); @@ -214,8 +214,8 @@ impl R1CSInstance { Timer::print(&format!("number_non-zero_entries_B {}", B.len())); Timer::print(&format!("number_non-zero_entries_C {}", C.len())); - let num_poly_vars_x = num_cons.log2() as usize; - let num_poly_vars_y = (2 * num_vars).log2() as usize; + let num_poly_vars_x = num_cons.log_2() as usize; + let num_poly_vars_y = (2 * num_vars).log_2() as usize; let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A); let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B); let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C); diff --git a/src/r1csproof.rs b/src/r1csproof.rs index fa264c2..3fc3457 100644 --- a/src/r1csproof.rs +++ b/src/r1csproof.rs @@ -10,6 +10,7 @@ use super::errors::ProofVerifyError; use super::group::{ CompressGroupElement, DecompressGroupElement, GroupElement, VartimeMultiscalarMul, }; +use super::math::Math; use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; use super::r1csinstance::R1CSInstance; use super::random::RandomTape; @@ -65,7 +66,7 @@ pub struct R1CSGens { impl R1CSGens { pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self { - let num_poly_vars = num_vars.log2() as usize; + let num_poly_vars = num_vars.log_2() as usize; let gens_pc = PolyCommitmentGens::new(num_poly_vars, label); let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1); R1CSGens { gens_sc, gens_pc } @@ -154,8 +155,10 @@ impl R1CSProof { }; // derive the verifier's challenge tau - let (num_rounds_x, num_rounds_y) = - (inst.get_num_cons().log2() as usize, z.len().log2() as usize); + let (num_rounds_x, num_rounds_y) = ( + inst.get_num_cons().log_2() as usize, + z.len().log_2() as usize, + ); let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); // compute the initial evaluation table for R(\tau, x) let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals()); @@ -247,7 +250,7 @@ impl R1CSProof { let n = num_vars; - let (num_rounds_x, num_rounds_y) = (num_cons.log2() as usize, (2 * num_vars).log2() as usize); + let (num_rounds_x, num_rounds_y) = (num_cons.log_2() as usize, (2 * num_vars).log_2() as usize); // derive the verifier's challenge tau let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); @@ -278,30 +281,10 @@ impl R1CSProof { let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; // verify the joint claim with a sum-check protocol -<<<<<<< HEAD let (claim_post_phase2, ry) = self .sc_proof_phase2 .verify(claim_phase2, num_rounds_y, 2, transcript)?; -======= - let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify( - &comm_claim_phase2, - num_rounds_y, - 2, - &gens.gens_sc.gens_1, - &gens.gens_sc.gens_3, - transcript, - )?; - - // verify Z(ry) proof against the initial commitment - self.proof_eval_vars_at_ry.verify( - &gens.gens_pc, - transcript, - &ry[1..], - &self.comm_vars_at_ry, - &self.comm_vars, - )?; ->>>>>>> clippy fixes (#50) let poly_input_eval = { // constant term @@ -312,7 +295,7 @@ impl R1CSProof { .map(|i| SparsePolyEntry::new(i + 1, input[i])) .collect::>(), ); - SparsePolynomial::new(n.log2() as usize, input_as_sparse_poly_entries).evaluate(&ry[1..]) + SparsePolynomial::new(n.log_2() as usize, input_as_sparse_poly_entries).evaluate(&ry[1..]) }; let eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval; diff --git a/src/sparse_mlpoly.rs b/src/sparse_mlpoly.rs index 4d10a8c..34d4718 100644 --- a/src/sparse_mlpoly.rs +++ b/src/sparse_mlpoly.rs @@ -92,7 +92,7 @@ impl DerefsEvalProof { ) -> PolyEvalProof { assert_eq!( joint_poly.get_num_vars(), - r.len() + evals.len().log2() as usize + r.len() + evals.len().log_2() as usize ); // append the claimed evaluations to transcript @@ -101,7 +101,7 @@ impl DerefsEvalProof { // n-to-1 reduction let (r_joint, eval_joint) = { let challenges = - transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log2() as usize); + transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2() as usize); let mut poly_evals = DensePolynomial::new(evals); for i in (0..challenges.len()).rev() { poly_evals.bound_poly_var_bot(&challenges[i]); @@ -167,7 +167,7 @@ impl DerefsEvalProof { // n-to-1 reduction let challenges = - transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log2() as usize); + transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2() as usize); let mut poly_evals = DensePolynomial::new(evals); for i in (0..challenges.len()).rev() { poly_evals.bound_poly_var_bot(&challenges[i]); @@ -301,15 +301,15 @@ impl SparseMatPolyCommitmentGens { num_nz_entries: usize, batch_size: usize, ) -> SparseMatPolyCommitmentGens { - let num_vars_ops = num_nz_entries.next_power_of_two().log2() as usize - + (batch_size * 5).next_power_of_two().log2() as usize; + let num_vars_ops = num_nz_entries.next_power_of_two().log_2() as usize + + (batch_size * 5).next_power_of_two().log_2() as usize; let num_vars_mem = if num_vars_x > num_vars_y { num_vars_x } else { num_vars_y } + 1; - let num_vars_derefs = num_nz_entries.next_power_of_two().log2() as usize - + (batch_size * 2).next_power_of_two().log2() as usize; + let num_vars_derefs = num_nz_entries.next_power_of_two().log_2() as usize + + (batch_size * 2).next_power_of_two().log_2() as usize; let gens_ops = PolyCommitmentGens::new(num_vars_ops, label); let gens_mem = PolyCommitmentGens::new(num_vars_mem, label); @@ -781,7 +781,7 @@ impl HashLayerProof { evals_ops.append_to_transcript(b"claim_evals_ops", transcript); let challenges_ops = transcript.challenge_vector( b"challenge_combine_n_to_one", - evals_ops.len().log2() as usize, + evals_ops.len().log_2() as usize, ); let mut poly_evals_ops = DensePolynomial::new(evals_ops); @@ -810,7 +810,7 @@ impl HashLayerProof { evals_mem.append_to_transcript(b"claim_evals_mem", transcript); let challenges_mem = transcript.challenge_vector( b"challenge_combine_two_to_one", - evals_mem.len().log2() as usize, + evals_mem.len().log_2() as usize, ); let mut poly_evals_mem = DensePolynomial::new(evals_mem); @@ -955,7 +955,7 @@ impl HashLayerProof { evals_ops.append_to_transcript(b"claim_evals_ops", transcript); let challenges_ops = transcript.challenge_vector( b"challenge_combine_n_to_one", - evals_ops.len().log2() as usize, + evals_ops.len().log_2() as usize, ); let mut poly_evals_ops = DensePolynomial::new(evals_ops); @@ -981,7 +981,7 @@ impl HashLayerProof { evals_mem.append_to_transcript(b"claim_evals_mem", transcript); let challenges_mem = transcript.challenge_vector( b"challenge_combine_two_to_one", - evals_mem.len().log2() as usize, + evals_mem.len().log_2() as usize, ); let mut poly_evals_mem = DensePolynomial::new(evals_mem); @@ -1632,8 +1632,8 @@ use rand::RngCore; let num_nz_entries: usize = 256; let num_rows: usize = 256; let num_cols: usize = 256; - let num_vars_x: usize = num_rows.log2() as usize; - let num_vars_y: usize = num_cols.log2() as usize; + let num_vars_x: usize = num_rows.log_2() as usize; + let num_vars_y: usize = num_cols.log_2() as usize; let mut M: Vec = Vec::new();