mirror of
https://github.com/arnaucube/testudo.git
synced 2026-01-12 16:51:28 +01:00
arkworks migration to bls12377
This commit is contained in:
@@ -32,6 +32,8 @@ ark-std = { version = "^0.3.0"}
|
|||||||
ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] }
|
ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] }
|
||||||
ark-serialize = { version = "^0.3.0", features = ["derive"] }
|
ark-serialize = { version = "^0.3.0", features = ["derive"] }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
|
rand = { version = "0.8", features = [ "std", "std_rng" ] }
|
||||||
|
num-bigint = { version = "0.4" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.3.1"
|
criterion = "0.3.1"
|
||||||
@@ -58,4 +60,4 @@ harness = false
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
multicore = ["rayon"]
|
multicore = ["rayon"]
|
||||||
profile = []
|
profile = []
|
||||||
54
README.md
54
README.md
@@ -109,13 +109,15 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
|
|||||||
|
|
||||||
```rust
|
```rust
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
# extern crate curve25519_dalek;
|
# extern crate ark_std;
|
||||||
# extern crate libspartan;
|
# extern crate libspartan;
|
||||||
# extern crate merlin;
|
# extern crate merlin;
|
||||||
# use curve25519_dalek::scalar::Scalar;
|
# mod scalar;
|
||||||
|
# use scalar::Scalar;
|
||||||
# use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
|
# use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
|
||||||
# use merlin::Transcript;
|
# use merlin::Transcript;
|
||||||
# use rand::rngs::OsRng;
|
# use ark_ff::{PrimeField, Field, BigInteger};
|
||||||
|
# use ark_std::{One, Zero, UniformRand};
|
||||||
# fn main() {
|
# fn main() {
|
||||||
// produce a tiny instance
|
// produce a tiny instance
|
||||||
let (
|
let (
|
||||||
@@ -177,16 +179,16 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
|
|||||||
|
|
||||||
// We will encode the above constraints into three matrices, where
|
// We will encode the above constraints into three matrices, where
|
||||||
// the coefficients in the matrix are in the little-endian byte order
|
// the coefficients in the matrix are in the little-endian byte order
|
||||||
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
|
|
||||||
// The constraint system is defined over a finite field, which in our case is
|
// The constraint system is defined over a finite field, which in our case is
|
||||||
// the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493
|
// the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493
|
||||||
// To construct these matrices, we will use `curve25519-dalek` but one can use any other method.
|
// To construct these matrices, we will use `curve25519-dalek` but one can use any other method.
|
||||||
|
|
||||||
// a variable that holds a byte representation of 1
|
// a variable that holds a byte representation of 1
|
||||||
let one = Scalar::one().to_bytes();
|
let one = Scalar::one().into_repr().to_bytes_le();
|
||||||
|
|
||||||
// R1CS is a set of three sparse matrices A B C, where is a row for every
|
// R1CS is a set of three sparse matrices A B C, where is a row for every
|
||||||
// constraint and a column for every entry in z = (vars, 1, inputs)
|
// constraint and a column for every entry in z = (vars, 1, inputs)
|
||||||
@@ -198,20 +200,20 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
|
|||||||
// We set 1 in matrix A for columns that correspond to Z0 and Z1
|
// We set 1 in matrix A for columns that correspond to Z0 and Z1
|
||||||
// We set 1 in matrix B for column that corresponds to I0
|
// We set 1 in matrix B for column that corresponds to I0
|
||||||
// We set 1 in matrix C for column that corresponds to Z2
|
// We set 1 in matrix C for column that corresponds to Z2
|
||||||
A.push((0, 0, one));
|
A.push((0, 0, one.clone()));
|
||||||
A.push((0, 1, one));
|
A.push((0, 1, one.clone()));
|
||||||
B.push((0, num_vars + 1, one));
|
B.push((0, num_vars + 1, one.clone()));
|
||||||
C.push((0, 2, one));
|
C.push((0, 2, one.clone()));
|
||||||
|
|
||||||
// constraint 1 entries in (A,B,C)
|
// constraint 1 entries in (A,B,C)
|
||||||
A.push((1, 0, one));
|
A.push((1, 0, one.clone()));
|
||||||
A.push((1, num_vars + 2, one));
|
A.push((1, num_vars + 2, one.clone()));
|
||||||
B.push((1, 2, one));
|
B.push((1, 2, one.clone()));
|
||||||
C.push((1, 3, one));
|
C.push((1, 3, one.clone()));
|
||||||
|
|
||||||
// constraint 3 entries in (A,B,C)
|
// constraint 3 entries in (A,B,C)
|
||||||
A.push((2, 4, one));
|
A.push((2, 4, one.clone()));
|
||||||
B.push((2, num_vars, one));
|
B.push((2, num_vars, one.clone()));
|
||||||
|
|
||||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||||
|
|
||||||
@@ -226,18 +228,18 @@ let mut rng = ark_std::rand::thread_rng();
|
|||||||
let z4 = Scalar::zero(); //constraint 2
|
let z4 = Scalar::zero(); //constraint 2
|
||||||
|
|
||||||
// create a VarsAssignment
|
// create a VarsAssignment
|
||||||
let mut vars = vec![Scalar::zero().to_bytes(); num_vars];
|
let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
|
||||||
vars[0] = z0.to_bytes();
|
vars[0] = z0.into_repr().to_bytes_le();
|
||||||
vars[1] = z1.to_bytes();
|
vars[1] = z1.into_repr().to_bytes_le();
|
||||||
vars[2] = z2.to_bytes();
|
vars[2] = z2.into_repr().to_bytes_le();
|
||||||
vars[3] = z3.to_bytes();
|
vars[3] = z3.into_repr().to_bytes_le();
|
||||||
vars[4] = z4.to_bytes();
|
vars[4] = z4.into_repr().to_bytes_le();
|
||||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||||
|
|
||||||
// create an InputsAssignment
|
// create an InputsAssignment
|
||||||
let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs];
|
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
|
||||||
inputs[0] = i0.to_bytes();
|
inputs[0] = i0.into_repr().to_bytes_le();
|
||||||
inputs[1] = i1.to_bytes();
|
inputs[1] = i1.into_repr().to_bytes_le();
|
||||||
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
||||||
|
|
||||||
// check if the instance we created is satisfiable
|
// check if the instance we created is satisfiable
|
||||||
|
|||||||
@@ -9,9 +9,10 @@
|
|||||||
//!
|
//!
|
||||||
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
|
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
|
||||||
use ark_bls12_377::Fr as Scalar;
|
use ark_bls12_377::Fr as Scalar;
|
||||||
|
use ark_ff::{PrimeField, BigInteger};
|
||||||
use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
|
use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use rand::rngs::OsRng;
|
use ark_std::{UniformRand, One, Zero};
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
fn produce_r1cs() -> (
|
fn produce_r1cs() -> (
|
||||||
@@ -31,11 +32,11 @@ fn produce_r1cs() -> (
|
|||||||
|
|
||||||
// We will encode the above constraints into three matrices, where
|
// We will encode the above constraints into three matrices, where
|
||||||
// the coefficients in the matrix are in the little-endian byte order
|
// the coefficients in the matrix are in the little-endian byte order
|
||||||
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
|
|
||||||
let one = Scalar::one().to_bytes();
|
let one = Scalar::one().into_repr().to_bytes_le();
|
||||||
|
|
||||||
// R1CS is a set of three sparse matrices A B C, where is a row for every
|
// R1CS is a set of three sparse matrices A B C, where is a row for every
|
||||||
// constraint and a column for every entry in z = (vars, 1, inputs)
|
// constraint and a column for every entry in z = (vars, 1, inputs)
|
||||||
@@ -44,29 +45,29 @@ fn produce_r1cs() -> (
|
|||||||
|
|
||||||
// constraint 0 entries in (A,B,C)
|
// constraint 0 entries in (A,B,C)
|
||||||
// constraint 0 is Z0 * Z0 - Z1 = 0.
|
// constraint 0 is Z0 * Z0 - Z1 = 0.
|
||||||
A.push((0, 0, one));
|
A.push((0, 0, one.clone()));
|
||||||
B.push((0, 0, one));
|
B.push((0, 0, one.clone()));
|
||||||
C.push((0, 1, one));
|
C.push((0, 1, one.clone()));
|
||||||
|
|
||||||
// constraint 1 entries in (A,B,C)
|
// constraint 1 entries in (A,B,C)
|
||||||
// constraint 1 is Z1 * Z0 - Z2 = 0.
|
// constraint 1 is Z1 * Z0 - Z2 = 0.
|
||||||
A.push((1, 1, one));
|
A.push((1, 1, one.clone()));
|
||||||
B.push((1, 0, one));
|
B.push((1, 0, one.clone()));
|
||||||
C.push((1, 2, one));
|
C.push((1, 2, one.clone()));
|
||||||
|
|
||||||
// constraint 2 entries in (A,B,C)
|
// constraint 2 entries in (A,B,C)
|
||||||
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
|
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
|
||||||
A.push((2, 2, one));
|
A.push((2, 2, one.clone()));
|
||||||
A.push((2, 0, one));
|
A.push((2, 0, one.clone()));
|
||||||
B.push((2, num_vars, one));
|
B.push((2, num_vars, one.clone()));
|
||||||
C.push((2, 3, one));
|
C.push((2, 3, one.clone()));
|
||||||
|
|
||||||
// constraint 3 entries in (A,B,C)
|
// constraint 3 entries in (A,B,C)
|
||||||
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
|
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
|
||||||
A.push((3, 3, one));
|
A.push((3, 3, one.clone()));
|
||||||
A.push((3, num_vars, Scalar::from(5u32).to_bytes()));
|
A.push((3, num_vars, Scalar::from(5u32).into_repr().to_bytes_le()));
|
||||||
B.push((3, num_vars, one));
|
B.push((3, num_vars, one.clone()));
|
||||||
C.push((3, num_vars + 1, one));
|
C.push((3, num_vars + 1, one.clone()));
|
||||||
|
|
||||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||||
|
|
||||||
@@ -79,16 +80,16 @@ let mut rng = ark_std::rand::thread_rng();
|
|||||||
let i0 = z3 + Scalar::from(5u32); // constraint 3
|
let i0 = z3 + Scalar::from(5u32); // constraint 3
|
||||||
|
|
||||||
// create a VarsAssignment
|
// create a VarsAssignment
|
||||||
let mut vars = vec![Scalar::zero().to_bytes(); num_vars];
|
let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
|
||||||
vars[0] = z0.to_bytes();
|
vars[0] = z0.into_repr().to_bytes_le();
|
||||||
vars[1] = z1.to_bytes();
|
vars[1] = z1.into_repr().to_bytes_le();
|
||||||
vars[2] = z2.to_bytes();
|
vars[2] = z2.into_repr().to_bytes_le();
|
||||||
vars[3] = z3.to_bytes();
|
vars[3] = z3.into_repr().to_bytes_le();
|
||||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||||
|
|
||||||
// create an InputsAssignment
|
// create an InputsAssignment
|
||||||
let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs];
|
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
|
||||||
inputs[0] = i0.to_bytes();
|
inputs[0] = i0.into_repr().to_bytes_le();
|
||||||
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
||||||
|
|
||||||
// check if the instance we created is satisfiable
|
// check if the instance we created is satisfiable
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ extern crate libspartan;
|
|||||||
extern crate merlin;
|
extern crate merlin;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
|
|
||||||
use flate2::{write::ZlibEncoder, Compression};
|
|
||||||
use libspartan::{Instance, NIZKGens, NIZK};
|
use libspartan::{Instance, NIZKGens, NIZK};
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
|
use ark_serialize::*;
|
||||||
|
|
||||||
fn print(msg: &str) {
|
fn print(msg: &str) {
|
||||||
let star = "* ";
|
let star = "* ";
|
||||||
@@ -33,9 +33,8 @@ pub fn main() {
|
|||||||
let mut prover_transcript = Transcript::new(b"nizk_example");
|
let mut prover_transcript = Transcript::new(b"nizk_example");
|
||||||
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
||||||
|
|
||||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
|
let mut proof_encoded = Vec::new();
|
||||||
bincode::serialize_into(&mut encoder, &proof).unwrap();
|
proof.serialize(&mut proof_encoded).unwrap();
|
||||||
let proof_encoded = encoder.finish().unwrap();
|
|
||||||
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
|
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
|
||||||
print(&msg_proof_len);
|
print(&msg_proof_len);
|
||||||
|
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ extern crate flate2;
|
|||||||
extern crate libspartan;
|
extern crate libspartan;
|
||||||
extern crate merlin;
|
extern crate merlin;
|
||||||
|
|
||||||
use flate2::{write::ZlibEncoder, Compression};
|
|
||||||
use libspartan::{Instance, SNARKGens, SNARK};
|
use libspartan::{Instance, SNARKGens, SNARK};
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
|
use ark_serialize::*;
|
||||||
|
|
||||||
fn print(msg: &str) {
|
fn print(msg: &str) {
|
||||||
let star = "* ";
|
let star = "* ";
|
||||||
@@ -43,9 +43,8 @@ pub fn main() {
|
|||||||
&mut prover_transcript,
|
&mut prover_transcript,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
|
let mut proof_encoded = Vec::new();
|
||||||
bincode::serialize_into(&mut encoder, &proof).unwrap();
|
proof.serialize(&mut proof_encoded).unwrap();
|
||||||
let proof_encoded = encoder.finish().unwrap();
|
|
||||||
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
|
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
|
||||||
print(&msg_proof_len);
|
print(&msg_proof_len);
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT};
|
use crate::group::{CompressGroupElement, DecompressGroupElement};
|
||||||
|
|
||||||
|
use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT, GroupElementAffine};
|
||||||
use super::scalar::Scalar;
|
use super::scalar::Scalar;
|
||||||
|
use ark_ff::PrimeField;
|
||||||
use digest::{ExtendableOutput, Input};
|
use digest::{ExtendableOutput, Input};
|
||||||
use sha3::Shake256;
|
use sha3::Shake256;
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use ark_ff::fields::{Field};
|
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||||
use ark_ec::{ProjectiveCurve};
|
use ark_ec::{ProjectiveCurve, AffineCurve};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct MultiCommitGens {
|
pub struct MultiCommitGens {
|
||||||
@@ -17,14 +20,21 @@ impl MultiCommitGens {
|
|||||||
pub fn new(n: usize, label: &[u8]) -> Self {
|
pub fn new(n: usize, label: &[u8]) -> Self {
|
||||||
let mut shake = Shake256::default();
|
let mut shake = Shake256::default();
|
||||||
shake.input(label);
|
shake.input(label);
|
||||||
shake.input(GROUP_BASEPOINT.as_bytes());
|
let mut generator_encoded = Vec::new();
|
||||||
|
GROUP_BASEPOINT.serialize(&mut generator_encoded).unwrap();
|
||||||
|
shake.input(generator_encoded);
|
||||||
|
|
||||||
let mut reader = shake.xof_result();
|
let mut reader = shake.xof_result();
|
||||||
let mut gens: Vec<GroupElement> = Vec::new();
|
let mut gens: Vec<GroupElement> = Vec::new();
|
||||||
let mut uniform_bytes = [0u8; 64];
|
let mut uniform_bytes = [0u8; 64];
|
||||||
for _ in 0..n + 1 {
|
for _ in 0..n + 1 {
|
||||||
|
let mut el_aff: Option<GroupElementAffine> = None;
|
||||||
|
while el_aff.is_some() != true {
|
||||||
reader.read_exact(&mut uniform_bytes).unwrap();
|
reader.read_exact(&mut uniform_bytes).unwrap();
|
||||||
gens.push(GroupElement::from_random_bytes(&uniform_bytes));
|
el_aff = GroupElementAffine::from_random_bytes(&uniform_bytes);
|
||||||
|
}
|
||||||
|
let el = el_aff.unwrap().mul_by_cofactor_to_projective();
|
||||||
|
gens.push(el);
|
||||||
}
|
}
|
||||||
|
|
||||||
MultiCommitGens {
|
MultiCommitGens {
|
||||||
@@ -74,13 +84,15 @@ impl Commitments for Scalar {
|
|||||||
impl Commitments for Vec<Scalar> {
|
impl Commitments for Vec<Scalar> {
|
||||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||||
assert_eq!(gens_n.n, self.len());
|
assert_eq!(gens_n.n, self.len());
|
||||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h
|
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr())
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Commitments for [Scalar] {
|
impl Commitments for [Scalar] {
|
||||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||||
assert_eq!(gens_n.n, self.len());
|
assert_eq!(gens_n.n, self.len());
|
||||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h
|
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr())
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#![allow(clippy::too_many_arguments)]
|
#![allow(clippy::too_many_arguments)]
|
||||||
use super::commitments::{Commitments, MultiCommitGens};
|
use super::commitments::{Commitments, MultiCommitGens};
|
||||||
use super::errors::ProofVerifyError;
|
use super::errors::ProofVerifyError;
|
||||||
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
|
use super::group::{GroupElement, CompressedGroup, VartimeMultiscalarMul, CompressGroupElement, DecompressGroupElement};
|
||||||
use super::math::Math;
|
use super::math::Math;
|
||||||
use super::nizk::{DotProductProofGens, DotProductProofLog};
|
use super::nizk::{DotProductProofGens, DotProductProofLog};
|
||||||
use super::random::RandomTape;
|
use super::random::RandomTape;
|
||||||
@@ -217,7 +217,7 @@ impl DensePolynomial {
|
|||||||
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
|
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
|
||||||
let n = self.len() / 2;
|
let n = self.len() / 2;
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]);
|
self.Z[i] = self.Z[i] + (self.Z[i + n] - self.Z[i]) * r;
|
||||||
}
|
}
|
||||||
self.num_vars -= 1;
|
self.num_vars -= 1;
|
||||||
self.len = n;
|
self.len = n;
|
||||||
@@ -226,7 +226,7 @@ impl DensePolynomial {
|
|||||||
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
|
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
|
||||||
let n = self.len() / 2;
|
let n = self.len() / 2;
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]);
|
self.Z[i] = self.Z[2 * i] + (self.Z[2 * i + 1] - self.Z[2 * i]) * r;
|
||||||
}
|
}
|
||||||
self.num_vars -= 1;
|
self.num_vars -= 1;
|
||||||
self.len = n;
|
self.len = n;
|
||||||
@@ -379,9 +379,9 @@ impl PolyEvalProof {
|
|||||||
let (L, R) = eq.compute_factored_evals();
|
let (L, R) = eq.compute_factored_evals();
|
||||||
|
|
||||||
// compute a weighted sum of commitments and L
|
// compute a weighted sum of commitments and L
|
||||||
let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap());
|
let C_decompressed = comm.C.iter().map(|pt| GroupElement::decompress(pt).unwrap()).collect::<Vec<GroupElement>>();
|
||||||
|
|
||||||
let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed).compress();
|
let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed.as_slice()).compress();
|
||||||
|
|
||||||
self
|
self
|
||||||
.proof
|
.proof
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ pub enum ProofVerifyError {
|
|||||||
#[error("Proof verification failed")]
|
#[error("Proof verification failed")]
|
||||||
InternalError,
|
InternalError,
|
||||||
#[error("Compressed group element failed to decompress: {0:?}")]
|
#[error("Compressed group element failed to decompress: {0:?}")]
|
||||||
DecompressionError([u8; 32]),
|
DecompressionError(Vec<u8>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ProofVerifyError {
|
impl Default for ProofVerifyError {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
ProofVerifyError::InternalError
|
ProofVerifyError::InternalError
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
|||||||
173
src/group.rs
173
src/group.rs
@@ -1,131 +1,84 @@
|
|||||||
|
use ark_bls12_377::FrParameters;
|
||||||
|
use ark_ec::group::Group;
|
||||||
|
use ark_ec::{
|
||||||
|
msm::VariableBaseMSM,
|
||||||
|
};
|
||||||
|
use ark_ff::{PrimeField, Fp256, Zero};
|
||||||
|
use digest::DynDigest;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
use num_bigint::BigInt;
|
||||||
|
use crate::errors::ProofVerifyError;
|
||||||
|
|
||||||
use super::scalar::{Scalar};
|
use super::scalar::{Scalar};
|
||||||
use core::borrow::Borrow;
|
use core::borrow::Borrow;
|
||||||
use core::ops::{Mul, MulAssign};
|
use core::ops::{Mul, MulAssign};
|
||||||
use ark_ec::{ProjectiveCurve, AffineCurve};
|
use ark_ec::{ProjectiveCurve, AffineCurve};
|
||||||
|
use ark_serialize::*;
|
||||||
|
|
||||||
pub use ark_bls12_377::G1Projective as GroupElement;
|
pub type GroupElement = ark_bls12_377::G1Projective;
|
||||||
pub use ark_bls12_377::G1Affine as AffineGroupElement;
|
pub type GroupElementAffine = ark_bls12_377::G1Affine;
|
||||||
|
|
||||||
|
#[derive(Clone, Eq, PartialEq, Hash, Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||||
|
pub struct CompressedGroup(pub Vec<u8>);
|
||||||
|
|
||||||
|
|
||||||
// pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto;
|
|
||||||
|
|
||||||
// pub trait CompressedGroupExt {
|
|
||||||
// type Group;
|
|
||||||
// fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
|
|
||||||
// }
|
|
||||||
|
|
||||||
|
|
||||||
// what I should prolly do is implement compression and decompression operation on the GroupAffine
|
|
||||||
|
|
||||||
// impl CompressedGroupExt for CompressedGroup {
|
|
||||||
// type Group = curve25519_dalek::ristretto::RistrettoPoint;
|
|
||||||
// fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
|
|
||||||
// self
|
|
||||||
// .decompress()
|
|
||||||
// .ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes()))
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// ????
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator();
|
pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait CompressGroupElement {
|
||||||
|
fn compress(&self) -> CompressedGroup;
|
||||||
|
}
|
||||||
|
|
||||||
// impl<'b> MulAssign<&'b Scalar> for GroupElement {
|
pub trait DecompressGroupElement {
|
||||||
// fn mul_assign(&mut self, scalar: &'b Scalar) {
|
fn decompress(encoded: &CompressedGroup) -> Option<GroupElement>;
|
||||||
// let result = (self as &GroupElement).mul( scalar.into_repr());
|
}
|
||||||
// *self = result;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // This game happens because dalek works with scalars as bytes representation but we want people to have an easy life and not care about this
|
pub trait UnpackGroupElement {
|
||||||
// impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
|
fn unpack(&self) -> Result<GroupElement, ProofVerifyError>;
|
||||||
// type Output = GroupElement;
|
}
|
||||||
// fn mul(self, scalar: &'b Scalar) -> GroupElement {
|
|
||||||
// self * Scalar::into_repr(scalar)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
|
impl CompressGroupElement for GroupElement {
|
||||||
// type Output = GroupElement;
|
fn compress(&self) -> CompressedGroup {
|
||||||
|
let mut point_encoding = Vec::new();
|
||||||
|
self.serialize(&mut point_encoding).unwrap();
|
||||||
|
// println!("in compress {:?}", point_encoding);;
|
||||||
|
CompressedGroup(point_encoding)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// fn mul(self, point: &'b GroupElement) -> GroupElement {
|
impl DecompressGroupElement for GroupElement {
|
||||||
// Scalar::into_repr(self) * point
|
fn decompress(encoded: &CompressedGroup) -> Option<Self>
|
||||||
// }
|
{
|
||||||
// }
|
|
||||||
|
|
||||||
// macro_rules! define_mul_variants {
|
let res = GroupElement::deserialize(&*encoded.0);
|
||||||
// (LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
|
if res.is_err() {
|
||||||
// impl<'b> Mul<&'b $rhs> for $lhs {
|
println!("{:?}", res);
|
||||||
// type Output = $out;
|
None
|
||||||
// fn mul(self, rhs: &'b $rhs) -> $out {
|
} else {
|
||||||
// &self * rhs
|
Some(res.unwrap())
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// impl<'a> Mul<$rhs> for &'a $lhs {
|
impl UnpackGroupElement for CompressedGroup {
|
||||||
// type Output = $out;
|
fn unpack(&self) -> Result<GroupElement, ProofVerifyError> {
|
||||||
// fn mul(self, rhs: $rhs) -> $out {
|
let encoded = self.0.clone();
|
||||||
// self * &rhs
|
GroupElement::decompress(self).ok_or_else(|| ProofVerifyError::DecompressionError(encoded))
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// impl Mul<$rhs> for $lhs {
|
pub trait VartimeMultiscalarMul {
|
||||||
// type Output = $out;
|
fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement;
|
||||||
// fn mul(self, rhs: $rhs) -> $out {
|
}
|
||||||
// &self * &rhs
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// };
|
|
||||||
// }
|
|
||||||
|
|
||||||
// macro_rules! define_mul_assign_variants {
|
impl VartimeMultiscalarMul for GroupElement {
|
||||||
// (LHS = $lhs:ty, RHS = $rhs:ty) => {
|
fn vartime_multiscalar_mul(
|
||||||
// impl MulAssign<$rhs> for $lhs {
|
scalars: &[Scalar],
|
||||||
// fn mul_assign(&mut self, rhs: $rhs) {
|
points: &[GroupElement],
|
||||||
// *self *= &rhs;
|
) -> GroupElement{
|
||||||
// }
|
let repr_scalars= scalars.into_iter().map(|S| S.borrow().into_repr()).collect::<Vec<<Scalar as PrimeField>::BigInt>>();
|
||||||
// }
|
let aff_points = points.into_iter().map(|P| P.borrow().into_affine()).collect::<Vec<GroupElementAffine>>();
|
||||||
// };
|
VariableBaseMSM::multi_scalar_mul(aff_points.as_slice(), repr_scalars.as_slice())
|
||||||
// }
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
|
|
||||||
// define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
|
|
||||||
// define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
|
|
||||||
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
// pub trait VartimeMultiscalarMul {
|
|
||||||
// type Scalar;
|
|
||||||
// fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
|
|
||||||
// where
|
|
||||||
// I: IntoIterator,
|
|
||||||
// I::Item: Borrow<Self::Scalar>,
|
|
||||||
// J: IntoIterator,
|
|
||||||
// J::Item: Borrow<Self>,
|
|
||||||
// Self: Clone;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// impl VartimeMultiscalarMul for GroupElement {
|
|
||||||
// type Scalar = super::scalar::Scalar;
|
|
||||||
// fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
|
|
||||||
// where
|
|
||||||
// I: IntoIterator,
|
|
||||||
// I::Item: Borrow<Self::Scalar>,
|
|
||||||
// J: IntoIterator,
|
|
||||||
// J::Item: Borrow<Self>,
|
|
||||||
// Self: Clone,
|
|
||||||
// {
|
|
||||||
// // use curve25519_dalek::traits::VartimeMultiscalarMul;
|
|
||||||
// <Self as VartimeMultiscalarMul>::vartime_multiscalar_mul(
|
|
||||||
// scalars
|
|
||||||
// .into_iter()
|
|
||||||
// .map(|s| Scalar::into_repr(s.borrow()))
|
|
||||||
// .collect::<Vec<Scalar>>(),
|
|
||||||
// points,
|
|
||||||
// )
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|||||||
88
src/lib.rs
88
src/lib.rs
@@ -6,11 +6,11 @@
|
|||||||
|
|
||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
extern crate core;
|
extern crate core;
|
||||||
extern crate curve25519_dalek;
|
|
||||||
extern crate digest;
|
extern crate digest;
|
||||||
extern crate merlin;
|
extern crate merlin;
|
||||||
extern crate sha3;
|
extern crate sha3;
|
||||||
extern crate test;
|
extern crate test;
|
||||||
|
extern crate rand;
|
||||||
extern crate lazy_static;
|
extern crate lazy_static;
|
||||||
extern crate ark_std;
|
extern crate ark_std;
|
||||||
|
|
||||||
@@ -34,7 +34,8 @@ mod timer;
|
|||||||
mod transcript;
|
mod transcript;
|
||||||
mod unipoly;
|
mod unipoly;
|
||||||
|
|
||||||
use core::cmp::max;
|
use core::{cmp::max};
|
||||||
|
use std::borrow::Borrow;
|
||||||
use errors::{ProofVerifyError, R1CSError};
|
use errors::{ProofVerifyError, R1CSError};
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use r1csinstance::{
|
use r1csinstance::{
|
||||||
@@ -44,6 +45,8 @@ use r1csproof::{R1CSGens, R1CSProof};
|
|||||||
use random::RandomTape;
|
use random::RandomTape;
|
||||||
use scalar::Scalar;
|
use scalar::Scalar;
|
||||||
use ark_serialize::*;
|
use ark_serialize::*;
|
||||||
|
use ark_ff::{PrimeField, Field, BigInteger};
|
||||||
|
use ark_std::{One, Zero, UniformRand};
|
||||||
use timer::Timer;
|
use timer::Timer;
|
||||||
use transcript::{AppendToTranscript, ProofTranscript};
|
use transcript::{AppendToTranscript, ProofTranscript};
|
||||||
|
|
||||||
@@ -65,12 +68,12 @@ pub struct Assignment {
|
|||||||
|
|
||||||
impl Assignment {
|
impl Assignment {
|
||||||
/// Constructs a new `Assignment` from a vector
|
/// Constructs a new `Assignment` from a vector
|
||||||
pub fn new(assignment: &[[u8; 32]]) -> Result<Assignment, R1CSError> {
|
pub fn new(assignment: &Vec<Vec<u8>>) -> Result<Assignment, R1CSError> {
|
||||||
let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result<Vec<Scalar>, R1CSError> {
|
let bytes_to_scalar = |vec: &Vec<Vec<u8>>| -> Result<Vec<Scalar>, R1CSError> {
|
||||||
let mut vec_scalar: Vec<Scalar> = Vec::new();
|
let mut vec_scalar: Vec<Scalar> = Vec::new();
|
||||||
for v in vec {
|
for v in vec {
|
||||||
let val = Scalar::from_bytes(v);
|
let val = Scalar::from_random_bytes(v.as_slice());
|
||||||
if val.is_some().unwrap_u8() == 1 {
|
if val.is_some() == true {
|
||||||
vec_scalar.push(val.unwrap());
|
vec_scalar.push(val.unwrap());
|
||||||
} else {
|
} else {
|
||||||
return Err(R1CSError::InvalidScalar);
|
return Err(R1CSError::InvalidScalar);
|
||||||
@@ -115,6 +118,7 @@ pub type VarsAssignment = Assignment;
|
|||||||
pub type InputsAssignment = Assignment;
|
pub type InputsAssignment = Assignment;
|
||||||
|
|
||||||
/// `Instance` holds the description of R1CS matrices
|
/// `Instance` holds the description of R1CS matrices
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
inst: R1CSInstance,
|
inst: R1CSInstance,
|
||||||
}
|
}
|
||||||
@@ -125,9 +129,9 @@ impl Instance {
|
|||||||
num_cons: usize,
|
num_cons: usize,
|
||||||
num_vars: usize,
|
num_vars: usize,
|
||||||
num_inputs: usize,
|
num_inputs: usize,
|
||||||
A: &[(usize, usize, [u8; 32])],
|
A: &[(usize, usize, Vec<u8>)],
|
||||||
B: &[(usize, usize, [u8; 32])],
|
B: &[(usize, usize, Vec<u8>)],
|
||||||
C: &[(usize, usize, [u8; 32])],
|
C: &[(usize, usize, Vec<u8>)],
|
||||||
) -> Result<Instance, R1CSError> {
|
) -> Result<Instance, R1CSError> {
|
||||||
let (num_vars_padded, num_cons_padded) = {
|
let (num_vars_padded, num_cons_padded) = {
|
||||||
let num_vars_padded = {
|
let num_vars_padded = {
|
||||||
@@ -162,27 +166,27 @@ impl Instance {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let bytes_to_scalar =
|
let bytes_to_scalar =
|
||||||
|tups: &[(usize, usize, [u8; 32])]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
|
|tups: & [(usize, usize, Vec<u8>)]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
|
||||||
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
|
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
|
||||||
for &(row, col, val_bytes) in tups {
|
for (row, col, val_bytes) in tups {
|
||||||
// row must be smaller than num_cons
|
// row must be smaller than num_cons
|
||||||
if row >= num_cons {
|
if *row >= num_cons {
|
||||||
return Err(R1CSError::InvalidIndex);
|
return Err(R1CSError::InvalidIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
// col must be smaller than num_vars + 1 + num_inputs
|
// col must be smaller than num_vars + 1 + num_inputs
|
||||||
if col >= num_vars + 1 + num_inputs {
|
if *col >= num_vars + 1 + num_inputs {
|
||||||
return Err(R1CSError::InvalidIndex);
|
return Err(R1CSError::InvalidIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
let val = Scalar::from_bytes(&val_bytes);
|
let val = Scalar::from_random_bytes(&val_bytes.as_slice());
|
||||||
if val.is_some().unwrap_u8() == 1 {
|
if val.is_some() == true {
|
||||||
// if col >= num_vars, it means that it is referencing a 1 or input in the satisfying
|
// if col >= num_vars, it means that it is referencing a 1 or input in the satisfying
|
||||||
// assignment
|
// assignment
|
||||||
if col >= num_vars {
|
if *col >= num_vars {
|
||||||
mat.push((row, col + num_vars_padded - num_vars, val.unwrap()));
|
mat.push((*row, *col + num_vars_padded - num_vars, val.unwrap()));
|
||||||
} else {
|
} else {
|
||||||
mat.push((row, col, val.unwrap()));
|
mat.push((*row, *col, val.unwrap()));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(R1CSError::InvalidScalar);
|
return Err(R1CSError::InvalidScalar);
|
||||||
@@ -376,7 +380,8 @@ impl SNARK {
|
|||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
|
let mut proof_encoded: Vec<u8> = Vec::new();
|
||||||
|
proof.serialize(&mut proof_encoded).unwrap();
|
||||||
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
|
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
|
||||||
|
|
||||||
(proof, rx, ry)
|
(proof, rx, ry)
|
||||||
@@ -405,7 +410,8 @@ impl SNARK {
|
|||||||
&mut random_tape,
|
&mut random_tape,
|
||||||
);
|
);
|
||||||
|
|
||||||
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
|
let mut proof_encoded: Vec<u8> = Vec::new();
|
||||||
|
proof.serialize(&mut proof_encoded).unwrap();
|
||||||
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
|
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
|
||||||
proof
|
proof
|
||||||
};
|
};
|
||||||
@@ -532,7 +538,8 @@ impl NIZK {
|
|||||||
transcript,
|
transcript,
|
||||||
&mut random_tape,
|
&mut random_tape,
|
||||||
);
|
);
|
||||||
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
|
let mut proof_encoded = Vec::new();
|
||||||
|
proof.serialize(&mut proof_encoded).unwrap();
|
||||||
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
|
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
|
||||||
(proof, rx, ry)
|
(proof, rx, ry)
|
||||||
};
|
};
|
||||||
@@ -588,6 +595,7 @@ impl NIZK {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use ark_ff::{PrimeField};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn check_snark() {
|
pub fn check_snark() {
|
||||||
@@ -634,9 +642,9 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
];
|
];
|
||||||
|
|
||||||
let A = vec![(0, 0, zero)];
|
let A = vec![(0, 0, zero.to_vec())];
|
||||||
let B = vec![(100, 1, zero)];
|
let B = vec![(100, 1, zero.to_vec())];
|
||||||
let C = vec![(1, 1, zero)];
|
let C = vec![(1, 1, zero.to_vec())];
|
||||||
|
|
||||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
||||||
assert!(inst.is_err());
|
assert!(inst.is_err());
|
||||||
@@ -659,9 +667,9 @@ mod tests {
|
|||||||
57, 51, 72, 125, 157, 41, 83, 167, 237, 115,
|
57, 51, 72, 125, 157, 41, 83, 167, 237, 115,
|
||||||
];
|
];
|
||||||
|
|
||||||
let A = vec![(0, 0, zero)];
|
let A = vec![(0, 0, zero.to_vec())];
|
||||||
let B = vec![(1, 1, larger_than_mod)];
|
let B = vec![(1, 1, larger_than_mod.to_vec())];
|
||||||
let C = vec![(1, 1, zero)];
|
let C = vec![(1, 1, zero.to_vec())];
|
||||||
|
|
||||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
||||||
assert!(inst.is_err());
|
assert!(inst.is_err());
|
||||||
@@ -678,25 +686,25 @@ mod tests {
|
|||||||
|
|
||||||
// We will encode the above constraints into three matrices, where
|
// We will encode the above constraints into three matrices, where
|
||||||
// the coefficients in the matrix are in the little-endian byte order
|
// the coefficients in the matrix are in the little-endian byte order
|
||||||
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
|
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||||
|
|
||||||
// Create a^2 + b + 13
|
// Create a^2 + b + 13
|
||||||
A.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
|
A.push((0, num_vars + 2, (Scalar::one().into_repr().to_bytes_le()))); // 1*a
|
||||||
B.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
|
B.push((0, num_vars + 2, Scalar::one().into_repr().to_bytes_le())); // 1*a
|
||||||
C.push((0, num_vars + 1, Scalar::one().to_bytes())); // 1*z
|
C.push((0, num_vars + 1, Scalar::one().into_repr().to_bytes_le())); // 1*z
|
||||||
C.push((0, num_vars, (-Scalar::from(13u64)).to_bytes())); // -13*1
|
C.push((0, num_vars, (-Scalar::from(13u64)).into_repr().to_bytes_le())); // -13*1
|
||||||
C.push((0, num_vars + 3, (-Scalar::one()).to_bytes())); // -1*b
|
C.push((0, num_vars + 3, (-Scalar::one()).into_repr().to_bytes_le())); // -1*b
|
||||||
|
|
||||||
// Var Assignments (Z_0 = 16 is the only output)
|
// Var Assignments (Z_0 = 16 is the only output)
|
||||||
let vars = vec![Scalar::zero().to_bytes(); num_vars];
|
let vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
|
||||||
|
|
||||||
// create an InputsAssignment (a = 1, b = 2)
|
// create an InputsAssignment (a = 1, b = 2)
|
||||||
let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs];
|
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
|
||||||
inputs[0] = Scalar::from(16u64).to_bytes();
|
inputs[0] = Scalar::from(16u64).into_repr().to_bytes_le();
|
||||||
inputs[1] = Scalar::from(1u64).to_bytes();
|
inputs[1] = Scalar::from(1u64).into_repr().to_bytes_le();
|
||||||
inputs[2] = Scalar::from(2u64).to_bytes();
|
inputs[2] = Scalar::from(2u64).into_repr().to_bytes_le();
|
||||||
|
|
||||||
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
||||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||||
|
|||||||
@@ -4,10 +4,11 @@
|
|||||||
#![allow(clippy::type_complexity)]
|
#![allow(clippy::type_complexity)]
|
||||||
#![allow(clippy::too_many_arguments)]
|
#![allow(clippy::too_many_arguments)]
|
||||||
use super::super::errors::ProofVerifyError;
|
use super::super::errors::ProofVerifyError;
|
||||||
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
|
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul, CompressGroupElement, DecompressGroupElement};
|
||||||
use super::super::scalar::Scalar;
|
use super::super::scalar::Scalar;
|
||||||
use super::super::transcript::ProofTranscript;
|
use super::super::transcript::ProofTranscript;
|
||||||
use core::iter;
|
use core::iter;
|
||||||
|
use std::ops::MulAssign;
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use ark_serialize::*;
|
use ark_serialize::*;
|
||||||
use ark_ff::{Field, fields};
|
use ark_ff::{Field, fields};
|
||||||
@@ -85,16 +86,16 @@ impl BulletReductionProof {
|
|||||||
a_L
|
a_L
|
||||||
.iter()
|
.iter()
|
||||||
.chain(iter::once(&c_L))
|
.chain(iter::once(&c_L))
|
||||||
.chain(iter::once(blind_L)),
|
.chain(iter::once(blind_L)).map(|s| *s).collect::<Vec<Scalar>>().as_slice(),
|
||||||
G_R.iter().chain(iter::once(Q)).chain(iter::once(H)),
|
G_R.iter().chain(iter::once(Q)).chain(iter::once(H)).map(|p| *p).collect::<Vec<GroupElement>>().as_slice(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let R = GroupElement::vartime_multiscalar_mul(
|
let R = GroupElement::vartime_multiscalar_mul(
|
||||||
a_R
|
a_R
|
||||||
.iter()
|
.iter()
|
||||||
.chain(iter::once(&c_R))
|
.chain(iter::once(&c_R))
|
||||||
.chain(iter::once(blind_R)),
|
.chain(iter::once(blind_R)).map(|s| *s).collect::<Vec<Scalar>>().as_slice(),
|
||||||
G_L.iter().chain(iter::once(Q)).chain(iter::once(H)),
|
G_L.iter().chain(iter::once(Q)).chain(iter::once(H)).map(|p| *p).collect::<Vec<GroupElement>>().as_slice(),
|
||||||
);
|
);
|
||||||
|
|
||||||
transcript.append_point(b"L", &L.compress());
|
transcript.append_point(b"L", &L.compress());
|
||||||
@@ -109,7 +110,7 @@ impl BulletReductionProof {
|
|||||||
G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
|
G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
|
||||||
}
|
}
|
||||||
|
|
||||||
blind_fin = blind_fin + blind_L * u * u + blind_R * u_inv * u_inv;
|
blind_fin = blind_fin + u * u * blind_L + u_inv * u_inv * blind_R;
|
||||||
|
|
||||||
L_vec.push(L.compress());
|
L_vec.push(L.compress());
|
||||||
R_vec.push(R.compress());
|
R_vec.push(R.compress());
|
||||||
@@ -159,8 +160,14 @@ impl BulletReductionProof {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
|
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
|
||||||
let mut challenges_inv = challenges.clone();
|
let mut challenges_inv: Vec<Scalar> = challenges.clone();
|
||||||
let allinv = ark_ff::fields::batch_inversion(&mut challenges_inv);
|
|
||||||
|
ark_ff::fields::batch_inversion(&mut challenges_inv);
|
||||||
|
let mut allinv: Scalar = Scalar::one();
|
||||||
|
for c in challenges.iter().filter(|s| !s.is_zero()) {
|
||||||
|
allinv.mul_assign(c);
|
||||||
|
}
|
||||||
|
allinv = allinv.inverse().unwrap();
|
||||||
|
|
||||||
// 3. Compute u_i^2 and (1/u_i)^2
|
// 3. Compute u_i^2 and (1/u_i)^2
|
||||||
for i in 0..lg_n {
|
for i in 0..lg_n {
|
||||||
@@ -202,24 +209,24 @@ impl BulletReductionProof {
|
|||||||
let Ls = self
|
let Ls = self
|
||||||
.L_vec
|
.L_vec
|
||||||
.iter()
|
.iter()
|
||||||
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
|
.map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError))
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
let Rs = self
|
let Rs = self
|
||||||
.R_vec
|
.R_vec
|
||||||
.iter()
|
.iter()
|
||||||
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
|
.map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError))
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
let G_hat = GroupElement::vartime_multiscalar_mul(s.iter(), G.iter());
|
let G_hat = GroupElement::vartime_multiscalar_mul(s.as_slice(), G);
|
||||||
let a_hat = inner_product(a, &s);
|
let a_hat = inner_product(a, &s);
|
||||||
|
|
||||||
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
|
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
|
||||||
u_sq
|
u_sq
|
||||||
.iter()
|
.iter()
|
||||||
.chain(u_inv_sq.iter())
|
.chain(u_inv_sq.iter())
|
||||||
.chain(iter::once(&Scalar::one())),
|
.chain(iter::once(&Scalar::one())).map(|s| *s).collect::<Vec<Scalar>>().as_slice(),
|
||||||
Ls.iter().chain(Rs.iter()).chain(iter::once(Gamma)),
|
Ls.iter().chain(Rs.iter()).chain(iter::once(Gamma)).map(|p| *p).collect::<Vec<GroupElement>>().as_slice(),
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((G_hat, Gamma_hat, a_hat))
|
Ok((G_hat, Gamma_hat, a_hat))
|
||||||
|
|||||||
144
src/nizk/mod.rs
144
src/nizk/mod.rs
@@ -1,12 +1,16 @@
|
|||||||
#![allow(clippy::too_many_arguments)]
|
#![allow(clippy::too_many_arguments)]
|
||||||
use super::commitments::{Commitments, MultiCommitGens};
|
use super::commitments::{Commitments, MultiCommitGens};
|
||||||
use super::errors::ProofVerifyError;
|
use super::errors::ProofVerifyError;
|
||||||
use super::group::{CompressedGroup, CompressedGroupExt};
|
use super::group::{
|
||||||
|
CompressedGroup, CompressGroupElement, UnpackGroupElement, GroupElement, DecompressGroupElement, GroupElementAffine};
|
||||||
use super::random::RandomTape;
|
use super::random::RandomTape;
|
||||||
use super::scalar::Scalar;
|
use super::scalar::Scalar;
|
||||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||||
|
use ark_ec::group::Group;
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use ark_serialize::*;
|
use ark_serialize::*;
|
||||||
|
use ark_ec::ProjectiveCurve;
|
||||||
|
use ark_ff::PrimeField;
|
||||||
|
|
||||||
mod bullet;
|
mod bullet;
|
||||||
use bullet::BulletReductionProof;
|
use bullet::BulletReductionProof;
|
||||||
@@ -44,8 +48,8 @@ impl KnowledgeProof {
|
|||||||
|
|
||||||
let c = transcript.challenge_scalar(b"c");
|
let c = transcript.challenge_scalar(b"c");
|
||||||
|
|
||||||
let z1 = x * c + t1;
|
let z1 = c * x + t1;
|
||||||
let z2 = r * c + t2;
|
let z2 = c * r + t2;
|
||||||
|
|
||||||
(KnowledgeProof { alpha, z1, z2 }, C)
|
(KnowledgeProof { alpha, z1, z2 }, C)
|
||||||
}
|
}
|
||||||
@@ -54,7 +58,8 @@ impl KnowledgeProof {
|
|||||||
&self,
|
&self,
|
||||||
gens_n: &MultiCommitGens,
|
gens_n: &MultiCommitGens,
|
||||||
transcript: &mut Transcript,
|
transcript: &mut Transcript,
|
||||||
C: &CompressedGroup,
|
C: &
|
||||||
|
CompressedGroup,
|
||||||
) -> Result<(), ProofVerifyError> {
|
) -> Result<(), ProofVerifyError> {
|
||||||
transcript.append_protocol_name(KnowledgeProof::protocol_name());
|
transcript.append_protocol_name(KnowledgeProof::protocol_name());
|
||||||
C.append_to_transcript(b"C", transcript);
|
C.append_to_transcript(b"C", transcript);
|
||||||
@@ -63,7 +68,7 @@ impl KnowledgeProof {
|
|||||||
let c = transcript.challenge_scalar(b"c");
|
let c = transcript.challenge_scalar(b"c");
|
||||||
|
|
||||||
let lhs = self.z1.commit(&self.z2, gens_n).compress();
|
let lhs = self.z1.commit(&self.z2, gens_n).compress();
|
||||||
let rhs = (c * C.unpack()? + self.alpha.unpack()?).compress();
|
let rhs = ( C.unpack()?.mul(c.into_repr()) + self.alpha.unpack()?).compress();
|
||||||
|
|
||||||
if lhs == rhs {
|
if lhs == rhs {
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -75,7 +80,8 @@ impl KnowledgeProof {
|
|||||||
|
|
||||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||||
pub struct EqualityProof {
|
pub struct EqualityProof {
|
||||||
alpha: CompressedGroup,
|
alpha:
|
||||||
|
CompressedGroup,
|
||||||
z: Scalar,
|
z: Scalar,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,7 +98,9 @@ impl EqualityProof {
|
|||||||
s1: &Scalar,
|
s1: &Scalar,
|
||||||
v2: &Scalar,
|
v2: &Scalar,
|
||||||
s2: &Scalar,
|
s2: &Scalar,
|
||||||
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
|
) -> (EqualityProof,
|
||||||
|
CompressedGroup,
|
||||||
|
CompressedGroup) {
|
||||||
transcript.append_protocol_name(EqualityProof::protocol_name());
|
transcript.append_protocol_name(EqualityProof::protocol_name());
|
||||||
|
|
||||||
// produce a random Scalar
|
// produce a random Scalar
|
||||||
@@ -104,12 +112,12 @@ impl EqualityProof {
|
|||||||
let C2 = v2.commit(s2, gens_n).compress();
|
let C2 = v2.commit(s2, gens_n).compress();
|
||||||
C2.append_to_transcript(b"C2", transcript);
|
C2.append_to_transcript(b"C2", transcript);
|
||||||
|
|
||||||
let alpha = (r * gens_n.h).compress();
|
let alpha = gens_n.h.mul(r.into_repr()).compress();
|
||||||
alpha.append_to_transcript(b"alpha", transcript);
|
alpha.append_to_transcript(b"alpha", transcript);
|
||||||
|
|
||||||
let c = transcript.challenge_scalar(b"c");
|
let c = transcript.challenge_scalar(b"c");
|
||||||
|
|
||||||
let z = c * (s1 - s2) + r;
|
let z = c * ((*s1) - s2) + r;
|
||||||
|
|
||||||
(EqualityProof { alpha, z }, C1, C2)
|
(EqualityProof { alpha, z }, C1, C2)
|
||||||
}
|
}
|
||||||
@@ -118,8 +126,10 @@ impl EqualityProof {
|
|||||||
&self,
|
&self,
|
||||||
gens_n: &MultiCommitGens,
|
gens_n: &MultiCommitGens,
|
||||||
transcript: &mut Transcript,
|
transcript: &mut Transcript,
|
||||||
C1: &CompressedGroup,
|
C1: &
|
||||||
C2: &CompressedGroup,
|
CompressedGroup,
|
||||||
|
C2: &
|
||||||
|
CompressedGroup,
|
||||||
) -> Result<(), ProofVerifyError> {
|
) -> Result<(), ProofVerifyError> {
|
||||||
transcript.append_protocol_name(EqualityProof::protocol_name());
|
transcript.append_protocol_name(EqualityProof::protocol_name());
|
||||||
C1.append_to_transcript(b"C1", transcript);
|
C1.append_to_transcript(b"C1", transcript);
|
||||||
@@ -129,11 +139,12 @@ impl EqualityProof {
|
|||||||
let c = transcript.challenge_scalar(b"c");
|
let c = transcript.challenge_scalar(b"c");
|
||||||
let rhs = {
|
let rhs = {
|
||||||
let C = C1.unpack()? - C2.unpack()?;
|
let C = C1.unpack()? - C2.unpack()?;
|
||||||
(c * C + self.alpha.unpack()?).compress()
|
(C.mul(c.into_repr()) + self.alpha.unpack()?).compress()
|
||||||
};
|
};
|
||||||
|
println!("rhs {:?}", rhs);
|
||||||
|
|
||||||
let lhs = (self.z * gens_n.h).compress();
|
let lhs = gens_n.h.mul(self.z.into_repr()).compress();
|
||||||
|
println!("lhs {:?}", lhs);
|
||||||
if lhs == rhs {
|
if lhs == rhs {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -144,10 +155,13 @@ impl EqualityProof {
|
|||||||
|
|
||||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||||
pub struct ProductProof {
|
pub struct ProductProof {
|
||||||
alpha: CompressedGroup,
|
alpha:
|
||||||
beta: CompressedGroup,
|
CompressedGroup,
|
||||||
delta: CompressedGroup,
|
beta:
|
||||||
z: [Scalar; 5],
|
CompressedGroup,
|
||||||
|
delta:
|
||||||
|
CompressedGroup,
|
||||||
|
z: Vec<Scalar>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProductProof {
|
impl ProductProof {
|
||||||
@@ -167,8 +181,11 @@ impl ProductProof {
|
|||||||
rZ: &Scalar,
|
rZ: &Scalar,
|
||||||
) -> (
|
) -> (
|
||||||
ProductProof,
|
ProductProof,
|
||||||
|
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
|
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
|
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
) {
|
) {
|
||||||
transcript.append_protocol_name(ProductProof::protocol_name());
|
transcript.append_protocol_name(ProductProof::protocol_name());
|
||||||
@@ -180,9 +197,17 @@ impl ProductProof {
|
|||||||
let b4 = random_tape.random_scalar(b"b4");
|
let b4 = random_tape.random_scalar(b"b4");
|
||||||
let b5 = random_tape.random_scalar(b"b5");
|
let b5 = random_tape.random_scalar(b"b5");
|
||||||
|
|
||||||
let X = x.commit(rX, gens_n).compress();
|
let X_unc = x.commit(rX, gens_n);
|
||||||
|
|
||||||
|
|
||||||
|
let X = X_unc.compress();
|
||||||
X.append_to_transcript(b"X", transcript);
|
X.append_to_transcript(b"X", transcript);
|
||||||
|
|
||||||
|
let X_new = GroupElement::decompress(&X);
|
||||||
|
|
||||||
|
assert_eq!(X_unc, X_new.unwrap());
|
||||||
|
|
||||||
|
|
||||||
let Y = y.commit(rY, gens_n).compress();
|
let Y = y.commit(rY, gens_n).compress();
|
||||||
Y.append_to_transcript(b"Y", transcript);
|
Y.append_to_transcript(b"Y", transcript);
|
||||||
|
|
||||||
@@ -198,7 +223,7 @@ impl ProductProof {
|
|||||||
let delta = {
|
let delta = {
|
||||||
let gens_X = &MultiCommitGens {
|
let gens_X = &MultiCommitGens {
|
||||||
n: 1,
|
n: 1,
|
||||||
G: vec![X.decompress().unwrap()],
|
G: vec![GroupElement::decompress(&X).unwrap()],
|
||||||
h: gens_n.h,
|
h: gens_n.h,
|
||||||
};
|
};
|
||||||
b3.commit(&b5, gens_X).compress()
|
b3.commit(&b5, gens_X).compress()
|
||||||
@@ -211,8 +236,8 @@ impl ProductProof {
|
|||||||
let z2 = b2 + c * rX;
|
let z2 = b2 + c * rX;
|
||||||
let z3 = b3 + c * y;
|
let z3 = b3 + c * y;
|
||||||
let z4 = b4 + c * rY;
|
let z4 = b4 + c * rY;
|
||||||
let z5 = b5 + c * (rZ - rX * y);
|
let z5 = b5 + c * ((*rZ) - (*rX) * y);
|
||||||
let z = [z1, z2, z3, z4, z5];
|
let z = [z1, z2, z3, z4, z5].to_vec();
|
||||||
|
|
||||||
(
|
(
|
||||||
ProductProof {
|
ProductProof {
|
||||||
@@ -228,14 +253,17 @@ impl ProductProof {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn check_equality(
|
fn check_equality(
|
||||||
P: &CompressedGroup,
|
P: &
|
||||||
X: &CompressedGroup,
|
CompressedGroup,
|
||||||
|
X: &
|
||||||
|
CompressedGroup,
|
||||||
c: &Scalar,
|
c: &Scalar,
|
||||||
gens_n: &MultiCommitGens,
|
gens_n: &MultiCommitGens,
|
||||||
z1: &Scalar,
|
z1: &Scalar,
|
||||||
z2: &Scalar,
|
z2: &Scalar,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
|
println!("{:?}", X);
|
||||||
|
let lhs = (GroupElement::decompress(P).unwrap() + GroupElement::decompress(X).unwrap().mul(c.into_repr())).compress();
|
||||||
let rhs = z1.commit(z2, gens_n).compress();
|
let rhs = z1.commit(z2, gens_n).compress();
|
||||||
|
|
||||||
lhs == rhs
|
lhs == rhs
|
||||||
@@ -245,9 +273,12 @@ impl ProductProof {
|
|||||||
&self,
|
&self,
|
||||||
gens_n: &MultiCommitGens,
|
gens_n: &MultiCommitGens,
|
||||||
transcript: &mut Transcript,
|
transcript: &mut Transcript,
|
||||||
X: &CompressedGroup,
|
X: &
|
||||||
Y: &CompressedGroup,
|
CompressedGroup,
|
||||||
Z: &CompressedGroup,
|
Y: &
|
||||||
|
CompressedGroup,
|
||||||
|
Z: &
|
||||||
|
CompressedGroup,
|
||||||
) -> Result<(), ProofVerifyError> {
|
) -> Result<(), ProofVerifyError> {
|
||||||
transcript.append_protocol_name(ProductProof::protocol_name());
|
transcript.append_protocol_name(ProductProof::protocol_name());
|
||||||
|
|
||||||
@@ -290,8 +321,10 @@ impl ProductProof {
|
|||||||
|
|
||||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||||
pub struct DotProductProof {
|
pub struct DotProductProof {
|
||||||
delta: CompressedGroup,
|
delta:
|
||||||
beta: CompressedGroup,
|
CompressedGroup,
|
||||||
|
beta:
|
||||||
|
CompressedGroup,
|
||||||
z: Vec<Scalar>,
|
z: Vec<Scalar>,
|
||||||
z_delta: Scalar,
|
z_delta: Scalar,
|
||||||
z_beta: Scalar,
|
z_beta: Scalar,
|
||||||
@@ -317,7 +350,9 @@ impl DotProductProof {
|
|||||||
a_vec: &[Scalar],
|
a_vec: &[Scalar],
|
||||||
y: &Scalar,
|
y: &Scalar,
|
||||||
blind_y: &Scalar,
|
blind_y: &Scalar,
|
||||||
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
|
) -> (DotProductProof,
|
||||||
|
CompressedGroup,
|
||||||
|
CompressedGroup) {
|
||||||
transcript.append_protocol_name(DotProductProof::protocol_name());
|
transcript.append_protocol_name(DotProductProof::protocol_name());
|
||||||
|
|
||||||
let n = x_vec.len();
|
let n = x_vec.len();
|
||||||
@@ -374,8 +409,10 @@ impl DotProductProof {
|
|||||||
gens_n: &MultiCommitGens,
|
gens_n: &MultiCommitGens,
|
||||||
transcript: &mut Transcript,
|
transcript: &mut Transcript,
|
||||||
a: &[Scalar],
|
a: &[Scalar],
|
||||||
Cx: &CompressedGroup,
|
Cx: &
|
||||||
Cy: &CompressedGroup,
|
CompressedGroup,
|
||||||
|
Cy: &
|
||||||
|
CompressedGroup,
|
||||||
) -> Result<(), ProofVerifyError> {
|
) -> Result<(), ProofVerifyError> {
|
||||||
assert_eq!(gens_n.n, a.len());
|
assert_eq!(gens_n.n, a.len());
|
||||||
assert_eq!(gens_1.n, 1);
|
assert_eq!(gens_1.n, 1);
|
||||||
@@ -390,11 +427,10 @@ impl DotProductProof {
|
|||||||
let c = transcript.challenge_scalar(b"c");
|
let c = transcript.challenge_scalar(b"c");
|
||||||
|
|
||||||
let mut result =
|
let mut result =
|
||||||
c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n);
|
Cx.unpack()?.mul(c.into_repr()) + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n);
|
||||||
|
|
||||||
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
|
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
|
||||||
result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1);
|
result &= Cy.unpack()?.mul(c.into_repr()) + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1);
|
||||||
|
|
||||||
if result {
|
if result {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -419,8 +455,10 @@ impl DotProductProofGens {
|
|||||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||||
pub struct DotProductProofLog {
|
pub struct DotProductProofLog {
|
||||||
bullet_reduction_proof: BulletReductionProof,
|
bullet_reduction_proof: BulletReductionProof,
|
||||||
delta: CompressedGroup,
|
delta:
|
||||||
beta: CompressedGroup,
|
CompressedGroup,
|
||||||
|
beta:
|
||||||
|
CompressedGroup,
|
||||||
z1: Scalar,
|
z1: Scalar,
|
||||||
z2: Scalar,
|
z2: Scalar,
|
||||||
}
|
}
|
||||||
@@ -444,7 +482,9 @@ impl DotProductProofLog {
|
|||||||
a_vec: &[Scalar],
|
a_vec: &[Scalar],
|
||||||
y: &Scalar,
|
y: &Scalar,
|
||||||
blind_y: &Scalar,
|
blind_y: &Scalar,
|
||||||
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
|
) -> (DotProductProofLog,
|
||||||
|
CompressedGroup,
|
||||||
|
CompressedGroup) {
|
||||||
transcript.append_protocol_name(DotProductProofLog::protocol_name());
|
transcript.append_protocol_name(DotProductProofLog::protocol_name());
|
||||||
|
|
||||||
let n = x_vec.len();
|
let n = x_vec.len();
|
||||||
@@ -471,7 +511,7 @@ impl DotProductProofLog {
|
|||||||
|
|
||||||
a_vec.append_to_transcript(b"a", transcript);
|
a_vec.append_to_transcript(b"a", transcript);
|
||||||
|
|
||||||
let blind_Gamma = blind_x + blind_y;
|
let blind_Gamma = (*blind_x) + blind_y;
|
||||||
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
|
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
|
||||||
BulletReductionProof::prove(
|
BulletReductionProof::prove(
|
||||||
transcript,
|
transcript,
|
||||||
@@ -522,8 +562,10 @@ impl DotProductProofLog {
|
|||||||
gens: &DotProductProofGens,
|
gens: &DotProductProofGens,
|
||||||
transcript: &mut Transcript,
|
transcript: &mut Transcript,
|
||||||
a: &[Scalar],
|
a: &[Scalar],
|
||||||
Cx: &CompressedGroup,
|
Cx: &
|
||||||
Cy: &CompressedGroup,
|
CompressedGroup,
|
||||||
|
Cy: &
|
||||||
|
CompressedGroup,
|
||||||
) -> Result<(), ProofVerifyError> {
|
) -> Result<(), ProofVerifyError> {
|
||||||
assert_eq!(gens.n, n);
|
assert_eq!(gens.n, n);
|
||||||
assert_eq!(a.len(), n);
|
assert_eq!(a.len(), n);
|
||||||
@@ -551,8 +593,8 @@ impl DotProductProofLog {
|
|||||||
let z1_s = &self.z1;
|
let z1_s = &self.z1;
|
||||||
let z2_s = &self.z2;
|
let z2_s = &self.z2;
|
||||||
|
|
||||||
let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress();
|
let lhs = ((Gamma_hat.mul(c_s.into_repr()) + beta_s).mul(a_hat_s.into_repr()) + delta_s).compress();
|
||||||
let rhs = ((g_hat + gens.gens_1.G[0] * a_hat_s) * z1_s + gens.gens_1.h * z2_s).compress();
|
let rhs = ((g_hat + gens.gens_1.G[0].mul(a_hat_s.into_repr())).mul(z1_s.into_repr()) + gens.gens_1.h.mul(z2_s.into_repr())).compress();
|
||||||
|
|
||||||
assert_eq!(lhs, rhs);
|
assert_eq!(lhs, rhs);
|
||||||
|
|
||||||
@@ -566,7 +608,13 @@ impl DotProductProofLog {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
use crate::group::VartimeMultiscalarMul;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use ark_bls12_377::{G1Affine, Fq, FqParameters};
|
||||||
|
use ark_ff::{Fp384, BigInteger384};
|
||||||
use ark_std::{UniformRand};
|
use ark_std::{UniformRand};
|
||||||
#[test]
|
#[test]
|
||||||
fn check_knowledgeproof() {
|
fn check_knowledgeproof() {
|
||||||
@@ -615,10 +663,14 @@ use ark_std::{UniformRand};
|
|||||||
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
|
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
|
||||||
.is_ok());
|
.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn check_productproof() {
|
fn check_productproof() {
|
||||||
let mut rng = ark_std::rand::thread_rng();
|
let mut rng = ark_std::rand::thread_rng();
|
||||||
|
let pt = GroupElement::rand(&mut rng);
|
||||||
|
let pt_c = pt.compress();
|
||||||
|
let pt2 = GroupElement::decompress(&pt_c).unwrap();
|
||||||
|
assert_eq!(pt, pt2);
|
||||||
|
|
||||||
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
|
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
|
||||||
let x = Scalar::rand(&mut rng);
|
let x = Scalar::rand(&mut rng);
|
||||||
|
|||||||
@@ -186,7 +186,7 @@ impl ProductCircuitEvalProof {
|
|||||||
let comb_func_prod = |poly_A_comp: &Scalar,
|
let comb_func_prod = |poly_A_comp: &Scalar,
|
||||||
poly_B_comp: &Scalar,
|
poly_B_comp: &Scalar,
|
||||||
poly_C_comp: &Scalar|
|
poly_C_comp: &Scalar|
|
||||||
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
|
-> Scalar { (*poly_A_comp) * poly_B_comp * poly_C_comp };
|
||||||
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
|
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
|
||||||
&claim,
|
&claim,
|
||||||
num_rounds_prod,
|
num_rounds_prod,
|
||||||
@@ -283,7 +283,7 @@ impl ProductCircuitEvalProofBatched {
|
|||||||
let comb_func_prod = |poly_A_comp: &Scalar,
|
let comb_func_prod = |poly_A_comp: &Scalar,
|
||||||
poly_B_comp: &Scalar,
|
poly_B_comp: &Scalar,
|
||||||
poly_C_comp: &Scalar|
|
poly_C_comp: &Scalar|
|
||||||
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
|
-> Scalar { (*poly_A_comp) * poly_B_comp * poly_C_comp };
|
||||||
|
|
||||||
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
|
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
|
||||||
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
|
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
|
||||||
@@ -455,7 +455,7 @@ impl ProductCircuitEvalProofBatched {
|
|||||||
|
|
||||||
claims_to_verify = (0..claims_prod_left.len())
|
claims_to_verify = (0..claims_prod_left.len())
|
||||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||||
.collect::<Vec<Scalar>>();
|
.collect();
|
||||||
|
|
||||||
// add claims to verify for dotp circuit
|
// add claims to verify for dotp circuit
|
||||||
if i == num_layers - 1 {
|
if i == num_layers - 1 {
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ use super::sparse_mlpoly::{
|
|||||||
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
|
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
|
||||||
};
|
};
|
||||||
use super::timer::Timer;
|
use super::timer::Timer;
|
||||||
use flate2::{write::ZlibEncoder, Compression};
|
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use ark_serialize::*;
|
use ark_serialize::*;
|
||||||
use ark_std::{One, Zero, UniformRand};
|
use ark_std::{One, Zero, UniformRand};
|
||||||
@@ -28,9 +27,8 @@ pub struct R1CSInstance {
|
|||||||
|
|
||||||
impl AppendToTranscript for R1CSInstance {
|
impl AppendToTranscript for R1CSInstance {
|
||||||
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
|
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
|
||||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
|
let mut bytes = Vec::new();
|
||||||
bincode::serialize_into(&mut encoder, &self).unwrap();
|
self.serialize(&mut bytes).unwrap();
|
||||||
let bytes = encoder.finish().unwrap();
|
|
||||||
transcript.append_message(b"R1CSInstance", &bytes);
|
transcript.append_message(b"R1CSInstance", &bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
#![allow(clippy::too_many_arguments)]
|
#![allow(clippy::too_many_arguments)]
|
||||||
|
use crate::group::CompressedGroup;
|
||||||
|
|
||||||
use super::commitments::{Commitments, MultiCommitGens};
|
use super::commitments::{Commitments, MultiCommitGens};
|
||||||
use super::dense_mlpoly::{
|
use super::dense_mlpoly::{
|
||||||
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
|
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
|
||||||
};
|
};
|
||||||
use super::errors::ProofVerifyError;
|
use super::errors::ProofVerifyError;
|
||||||
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
|
use super::group::{GroupElement, VartimeMultiscalarMul, CompressGroupElement, DecompressGroupElement};
|
||||||
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
|
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
|
||||||
use super::r1csinstance::R1CSInstance;
|
use super::r1csinstance::R1CSInstance;
|
||||||
use super::random::RandomTape;
|
use super::random::RandomTape;
|
||||||
@@ -14,19 +16,21 @@ use super::sumcheck::ZKSumcheckInstanceProof;
|
|||||||
use super::timer::Timer;
|
use super::timer::Timer;
|
||||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||||
use core::iter;
|
use core::iter;
|
||||||
|
use ark_ff::PrimeField;
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use ark_serialize::*;
|
use ark_serialize::*;
|
||||||
use ark_std::{Zero, One};
|
use ark_std::{Zero, One};
|
||||||
|
use ark_ec::{ProjectiveCurve};
|
||||||
|
|
||||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||||
pub struct R1CSProof {
|
pub struct R1CSProof {
|
||||||
comm_vars: PolyCommitment,
|
comm_vars: PolyCommitment,
|
||||||
sc_proof_phase1: ZKSumcheckInstanceProof,
|
sc_proof_phase1: ZKSumcheckInstanceProof,
|
||||||
claims_phase2: (
|
claims_phase2: (
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
CompressedGroup,
|
CompressedGroup,
|
||||||
),
|
),
|
||||||
pok_claims_phase2: (KnowledgeProof, ProductProof),
|
pok_claims_phase2: (KnowledgeProof, ProductProof),
|
||||||
proof_eq_sc_phase1: EqualityProof,
|
proof_eq_sc_phase1: EqualityProof,
|
||||||
@@ -86,7 +90,7 @@ impl R1CSProof {
|
|||||||
poly_B_comp: &Scalar,
|
poly_B_comp: &Scalar,
|
||||||
poly_C_comp: &Scalar,
|
poly_C_comp: &Scalar,
|
||||||
poly_D_comp: &Scalar|
|
poly_D_comp: &Scalar|
|
||||||
-> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) };
|
-> Scalar { (*poly_A_comp) * ((*poly_B_comp) * poly_C_comp - poly_D_comp) };
|
||||||
|
|
||||||
let (sc_proof_phase_one, r, claims, blind_claim_postsc) =
|
let (sc_proof_phase_one, r, claims, blind_claim_postsc) =
|
||||||
ZKSumcheckInstanceProof::prove_cubic_with_additive_term(
|
ZKSumcheckInstanceProof::prove_cubic_with_additive_term(
|
||||||
@@ -118,7 +122,7 @@ impl R1CSProof {
|
|||||||
random_tape: &mut RandomTape,
|
random_tape: &mut RandomTape,
|
||||||
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
|
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
|
||||||
let comb_func =
|
let comb_func =
|
||||||
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
|
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { (*poly_A_comp) * poly_B_comp };
|
||||||
let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad(
|
let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad(
|
||||||
claim,
|
claim,
|
||||||
blind_claim,
|
blind_claim,
|
||||||
@@ -227,7 +231,7 @@ impl R1CSProof {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = {
|
let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = {
|
||||||
let prod = Az_claim * Bz_claim;
|
let prod = (*Az_claim) * Bz_claim;
|
||||||
ProductProof::prove(
|
ProductProof::prove(
|
||||||
&gens.gens_sc.gens_1,
|
&gens.gens_sc.gens_1,
|
||||||
transcript,
|
transcript,
|
||||||
@@ -248,8 +252,8 @@ impl R1CSProof {
|
|||||||
|
|
||||||
// prove the final step of sum-check #1
|
// prove the final step of sum-check #1
|
||||||
let taus_bound_rx = tau_claim;
|
let taus_bound_rx = tau_claim;
|
||||||
let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind);
|
let blind_expected_claim_postsc1 = (prod_Az_Bz_blind - Cz_blind) * taus_bound_rx;
|
||||||
let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx;
|
let claim_post_phase1 = ((*Az_claim) * Bz_claim - Cz_claim) * taus_bound_rx;
|
||||||
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
|
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
|
||||||
&gens.gens_sc.gens_1,
|
&gens.gens_sc.gens_1,
|
||||||
transcript,
|
transcript,
|
||||||
@@ -404,8 +408,7 @@ impl R1CSProof {
|
|||||||
let taus_bound_rx: Scalar = (0..rx.len())
|
let taus_bound_rx: Scalar = (0..rx.len())
|
||||||
.map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i]))
|
.map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i]))
|
||||||
.product();
|
.product();
|
||||||
let expected_claim_post_phase1 = (taus_bound_rx
|
let expected_claim_post_phase1 = (GroupElement::decompress(comm_prod_Az_Bz_claims).unwrap() - GroupElement::decompress(comm_Cz_claim).unwrap()).mul(taus_bound_rx.into_repr())
|
||||||
* (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap()))
|
|
||||||
.compress();
|
.compress();
|
||||||
|
|
||||||
// verify proof that expected_claim_post_phase1 == claim_post_phase1
|
// verify proof that expected_claim_post_phase1 == claim_post_phase1
|
||||||
@@ -425,12 +428,12 @@ impl R1CSProof {
|
|||||||
let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul(
|
let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul(
|
||||||
iter::once(&r_A)
|
iter::once(&r_A)
|
||||||
.chain(iter::once(&r_B))
|
.chain(iter::once(&r_B))
|
||||||
.chain(iter::once(&r_C)),
|
.chain(iter::once(&r_C)).map(|s| (*s)).collect::<Vec<Scalar>>().as_slice(),
|
||||||
iter::once(&comm_Az_claim)
|
iter::once(&comm_Az_claim)
|
||||||
.chain(iter::once(&comm_Bz_claim))
|
.chain(iter::once(&comm_Bz_claim))
|
||||||
.chain(iter::once(&comm_Cz_claim))
|
.chain(iter::once(&comm_Cz_claim))
|
||||||
.map(|pt| pt.decompress().unwrap())
|
.map(|pt| GroupElement::decompress(pt).unwrap())
|
||||||
.collect::<Vec<GroupElement>>(),
|
.collect::<Vec<GroupElement>>().as_slice(),
|
||||||
)
|
)
|
||||||
.compress();
|
.compress();
|
||||||
|
|
||||||
@@ -468,16 +471,17 @@ impl R1CSProof {
|
|||||||
|
|
||||||
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
|
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
|
||||||
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
|
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
|
||||||
iter::once(Scalar::one() - ry[0]).chain(iter::once(ry[0])),
|
iter::once(Scalar::one() - ry[0]).chain(iter::once(ry[0])).collect::<Vec<Scalar>>().as_slice(),
|
||||||
iter::once(&self.comm_vars_at_ry.decompress().unwrap()).chain(iter::once(
|
iter::once(GroupElement::decompress(&self.comm_vars_at_ry).unwrap()).chain(iter::once(
|
||||||
&poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
|
poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
|
||||||
)),
|
)).collect::<Vec<GroupElement>>().as_slice(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// perform the final check in the second sum-check protocol
|
// perform the final check in the second sum-check protocol
|
||||||
let (eval_A_r, eval_B_r, eval_C_r) = evals;
|
let (eval_A_r, eval_B_r, eval_C_r) = evals;
|
||||||
|
let scalar = r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r;
|
||||||
let expected_claim_post_phase2 =
|
let expected_claim_post_phase2 =
|
||||||
((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress();
|
comm_eval_Z_at_ry.mul(scalar.into_repr()).compress();
|
||||||
// verify proof that expected_claim_post_phase1 == claim_post_phase1
|
// verify proof that expected_claim_post_phase1 == claim_post_phase1
|
||||||
self.proof_eq_sc_phase2.verify(
|
self.proof_eq_sc_phase2.verify(
|
||||||
&gens.gens_sc.gens_1,
|
&gens.gens_sc.gens_1,
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use super::transcript::{AppendToTranscript, ProofTranscript};
|
|||||||
use core::cmp::Ordering;
|
use core::cmp::Ordering;
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
use ark_serialize::*;
|
use ark_serialize::*;
|
||||||
use ark_ff::{One, Zero};
|
use ark_ff::{One, Zero, Field};
|
||||||
|
|
||||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||||
pub struct SparseMatEntry {
|
pub struct SparseMatEntry {
|
||||||
@@ -466,9 +466,9 @@ impl SparseMatPolynomial {
|
|||||||
let row = self.M[i].row;
|
let row = self.M[i].row;
|
||||||
let col = self.M[i].col;
|
let col = self.M[i].col;
|
||||||
let val = &self.M[i].val;
|
let val = &self.M[i].val;
|
||||||
(row, val * z[col])
|
(row, z[col] * val)
|
||||||
})
|
})
|
||||||
.fold(vec![Scalar::zero(); num_rows], |mut Mz, (r, v)| {
|
.fold(vec![Scalar::zero(); num_rows], |mut Mz, (r, v)| {
|
||||||
Mz[r] += v;
|
Mz[r] += v;
|
||||||
Mz
|
Mz
|
||||||
})
|
})
|
||||||
@@ -553,9 +553,9 @@ impl Layers {
|
|||||||
let (r_hash, r_multiset_check) = r_mem_check;
|
let (r_hash, r_multiset_check) = r_mem_check;
|
||||||
|
|
||||||
//hash(addr, val, ts) = ts * r_hash_sqr + val * r_hash + addr
|
//hash(addr, val, ts) = ts * r_hash_sqr + val * r_hash + addr
|
||||||
let r_hash_sqr = r_hash * r_hash;
|
let r_hash_sqr = r_hash.square();
|
||||||
let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar {
|
let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar {
|
||||||
ts * r_hash_sqr + val * r_hash + addr
|
r_hash_sqr * ts + (*val) * r_hash + addr
|
||||||
};
|
};
|
||||||
|
|
||||||
// hash init and audit that does not depend on #instances
|
// hash init and audit that does not depend on #instances
|
||||||
@@ -856,9 +856,9 @@ impl HashLayerProof {
|
|||||||
r_hash: &Scalar,
|
r_hash: &Scalar,
|
||||||
r_multiset_check: &Scalar,
|
r_multiset_check: &Scalar,
|
||||||
) -> Result<(), ProofVerifyError> {
|
) -> Result<(), ProofVerifyError> {
|
||||||
let r_hash_sqr = r_hash * r_hash;
|
let r_hash_sqr = r_hash.square();
|
||||||
let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar {
|
let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar {
|
||||||
ts * r_hash_sqr + val * r_hash + addr
|
r_hash_sqr * ts + (*val) * r_hash + addr
|
||||||
};
|
};
|
||||||
|
|
||||||
let (rand_mem, _rand_ops) = rand;
|
let (rand_mem, _rand_ops) = rand;
|
||||||
@@ -1220,7 +1220,8 @@ impl ProductLayerProof {
|
|||||||
proof_ops,
|
proof_ops,
|
||||||
};
|
};
|
||||||
|
|
||||||
let product_layer_proof_encoded: Vec<u8> = bincode::serialize(&product_layer_proof).unwrap();
|
let mut product_layer_proof_encoded: Vec<u8> = Vec::new();
|
||||||
|
product_layer_proof.serialize(&mut product_layer_proof_encoded).unwrap();
|
||||||
let msg = format!(
|
let msg = format!(
|
||||||
"len_product_layer_proof {:?}",
|
"len_product_layer_proof {:?}",
|
||||||
product_layer_proof_encoded.len()
|
product_layer_proof_encoded.len()
|
||||||
@@ -1259,7 +1260,7 @@ impl ProductLayerProof {
|
|||||||
.map(|i| row_eval_write[i])
|
.map(|i| row_eval_write[i])
|
||||||
.product();
|
.product();
|
||||||
let rs: Scalar = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product();
|
let rs: Scalar = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product();
|
||||||
assert_eq!(row_eval_init * ws, rs * row_eval_audit);
|
assert_eq!( ws * row_eval_init , rs * row_eval_audit);
|
||||||
|
|
||||||
row_eval_init.append_to_transcript(b"claim_row_eval_init", transcript);
|
row_eval_init.append_to_transcript(b"claim_row_eval_init", transcript);
|
||||||
row_eval_read.append_to_transcript(b"claim_row_eval_read", transcript);
|
row_eval_read.append_to_transcript(b"claim_row_eval_read", transcript);
|
||||||
@@ -1274,7 +1275,7 @@ impl ProductLayerProof {
|
|||||||
.map(|i| col_eval_write[i])
|
.map(|i| col_eval_write[i])
|
||||||
.product();
|
.product();
|
||||||
let rs: Scalar = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product();
|
let rs: Scalar = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product();
|
||||||
assert_eq!(col_eval_init * ws, rs * col_eval_audit);
|
assert_eq!(ws * col_eval_init, rs * col_eval_audit);
|
||||||
|
|
||||||
col_eval_init.append_to_transcript(b"claim_col_eval_init", transcript);
|
col_eval_init.append_to_transcript(b"claim_col_eval_init", transcript);
|
||||||
col_eval_read.append_to_transcript(b"claim_col_eval_read", transcript);
|
col_eval_read.append_to_transcript(b"claim_col_eval_read", transcript);
|
||||||
@@ -1628,7 +1629,8 @@ impl SparsePolynomial {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use ark_std::{UniformRand};
|
use ark_std::{UniformRand};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn check_sparse_polyeval_proof() {
|
fn check_sparse_polyeval_proof() {
|
||||||
let mut rng = ark_std::rand::thread_rng();
|
let mut rng = ark_std::rand::thread_rng();
|
||||||
@@ -1643,8 +1645,8 @@ use ark_std::{UniformRand};
|
|||||||
|
|
||||||
for _i in 0..num_nz_entries {
|
for _i in 0..num_nz_entries {
|
||||||
M.push(SparseMatEntry::new(
|
M.push(SparseMatEntry::new(
|
||||||
(csprng.next_u64() % (num_rows as u64)) as usize,
|
(rng.next_u64()% (num_rows as u64)) as usize,
|
||||||
(csprng.next_u64() % (num_cols as u64)) as usize,
|
(rng.next_u64() % (num_cols as u64)) as usize,
|
||||||
Scalar::rand(&mut rng),
|
Scalar::rand(&mut rng),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
use super::commitments::{Commitments, MultiCommitGens};
|
use super::commitments::{Commitments, MultiCommitGens};
|
||||||
use super::dense_mlpoly::DensePolynomial;
|
use super::dense_mlpoly::DensePolynomial;
|
||||||
use super::errors::ProofVerifyError;
|
use super::errors::ProofVerifyError;
|
||||||
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
|
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul, CompressGroupElement, DecompressGroupElement};
|
||||||
use super::nizk::DotProductProof;
|
use super::nizk::DotProductProof;
|
||||||
use super::random::RandomTape;
|
use super::random::RandomTape;
|
||||||
use super::scalar::Scalar;
|
use super::scalar::Scalar;
|
||||||
@@ -115,7 +115,7 @@ impl ZKSumcheckInstanceProof {
|
|||||||
} else {
|
} else {
|
||||||
&self.comm_evals[i - 1]
|
&self.comm_evals[i - 1]
|
||||||
};
|
};
|
||||||
let comm_eval = &self.comm_evals[i];
|
let mut comm_eval = &self.comm_evals[i];
|
||||||
|
|
||||||
// add two claims to transcript
|
// add two claims to transcript
|
||||||
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
|
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
|
||||||
@@ -126,11 +126,11 @@ impl ZKSumcheckInstanceProof {
|
|||||||
|
|
||||||
// compute a weighted sum of the RHS
|
// compute a weighted sum of the RHS
|
||||||
let comm_target = GroupElement::vartime_multiscalar_mul(
|
let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||||
w.iter(),
|
w.as_slice(),
|
||||||
iter::once(&comm_claim_per_round)
|
iter::once(&comm_claim_per_round)
|
||||||
.chain(iter::once(&comm_eval))
|
.chain(iter::once(&comm_eval))
|
||||||
.map(|pt| pt.decompress().unwrap())
|
.map(|pt| GroupElement::decompress(pt).unwrap())
|
||||||
.collect::<Vec<GroupElement>>(),
|
.collect::<Vec<GroupElement>>().as_slice(),
|
||||||
)
|
)
|
||||||
.compress();
|
.compress();
|
||||||
|
|
||||||
@@ -176,7 +176,7 @@ impl ZKSumcheckInstanceProof {
|
|||||||
r.push(r_i);
|
r.push(r_i);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((self.comm_evals[self.comm_evals.len() - 1], r))
|
Ok((self.comm_evals[&self.comm_evals.len() - 1].clone(), r))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -510,11 +510,11 @@ impl ZKSumcheckInstanceProof {
|
|||||||
// compute a weighted sum of the RHS
|
// compute a weighted sum of the RHS
|
||||||
let target = w[0] * claim_per_round + w[1] * eval;
|
let target = w[0] * claim_per_round + w[1] * eval;
|
||||||
let comm_target = GroupElement::vartime_multiscalar_mul(
|
let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||||
w.iter(),
|
w.as_slice(),
|
||||||
iter::once(&comm_claim_per_round)
|
iter::once(&comm_claim_per_round)
|
||||||
.chain(iter::once(&comm_eval))
|
.chain(iter::once(&comm_eval))
|
||||||
.map(|pt| pt.decompress().unwrap())
|
.map(|pt| GroupElement::decompress(pt).unwrap())
|
||||||
.collect::<Vec<GroupElement>>(),
|
.collect::<Vec<GroupElement>>().as_slice(),
|
||||||
)
|
)
|
||||||
.compress();
|
.compress();
|
||||||
|
|
||||||
@@ -575,7 +575,7 @@ impl ZKSumcheckInstanceProof {
|
|||||||
|
|
||||||
proofs.push(proof);
|
proofs.push(proof);
|
||||||
r.push(r_j);
|
r.push(r_j);
|
||||||
comm_evals.push(comm_claim_per_round);
|
comm_evals.push(comm_claim_per_round.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
(
|
||||||
@@ -693,20 +693,21 @@ impl ZKSumcheckInstanceProof {
|
|||||||
// add two claims to transcript
|
// add two claims to transcript
|
||||||
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
|
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
|
||||||
comm_eval.append_to_transcript(b"comm_eval", transcript);
|
comm_eval.append_to_transcript(b"comm_eval", transcript);
|
||||||
|
|
||||||
|
|
||||||
// produce two weights
|
// produce two weights
|
||||||
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
|
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
|
||||||
|
|
||||||
// compute a weighted sum of the RHS
|
// compute a weighted sum of the RHS
|
||||||
let target = w[0] * claim_per_round + w[1] * eval;
|
let target = w[0] * claim_per_round + w[1] * eval;
|
||||||
|
|
||||||
let comm_target = GroupElement::vartime_multiscalar_mul(
|
let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||||
w.iter(),
|
w.as_slice(),
|
||||||
iter::once(&comm_claim_per_round)
|
iter::once(&comm_claim_per_round)
|
||||||
.chain(iter::once(&comm_eval))
|
.chain(iter::once(&comm_eval))
|
||||||
.map(|pt| pt.decompress().unwrap())
|
.map(|pt|GroupElement::decompress(&pt).unwrap())
|
||||||
.collect::<Vec<GroupElement>>(),
|
.collect::<Vec<GroupElement>>().as_slice(),
|
||||||
)
|
).compress();
|
||||||
.compress();
|
|
||||||
|
|
||||||
let blind = {
|
let blind = {
|
||||||
let blind_sc = if j == 0 {
|
let blind_sc = if j == 0 {
|
||||||
@@ -720,7 +721,10 @@ impl ZKSumcheckInstanceProof {
|
|||||||
w[0] * blind_sc + w[1] * blind_eval
|
w[0] * blind_sc + w[1] * blind_eval
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
|
|
||||||
|
let res = target.commit(&blind, gens_1);
|
||||||
|
|
||||||
|
assert_eq!(res.compress(), comm_target);
|
||||||
|
|
||||||
let a = {
|
let a = {
|
||||||
// the vector to use to decommit for sum-check test
|
// the vector to use to decommit for sum-check test
|
||||||
@@ -765,7 +769,7 @@ impl ZKSumcheckInstanceProof {
|
|||||||
claim_per_round = claim_next_round;
|
claim_per_round = claim_next_round;
|
||||||
comm_claim_per_round = comm_claim_next_round;
|
comm_claim_per_round = comm_claim_next_round;
|
||||||
r.push(r_j);
|
r.push(r_j);
|
||||||
comm_evals.push(comm_claim_per_round);
|
comm_evals.push(comm_claim_per_round.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
(
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
use super::group::CompressedGroup;
|
use crate::group::CompressedGroup;
|
||||||
use super::scalar::Scalar;
|
use super::scalar::Scalar;
|
||||||
use merlin::Transcript;
|
use merlin::Transcript;
|
||||||
|
use ark_ff::{PrimeField, BigInteger};
|
||||||
|
use ark_serialize::{CanonicalSerialize};
|
||||||
|
|
||||||
pub trait ProofTranscript {
|
pub trait ProofTranscript {
|
||||||
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
|
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
|
||||||
@@ -16,17 +18,19 @@ impl ProofTranscript for Transcript {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
|
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
|
||||||
self.append_message(label, &scalar.to_bytes());
|
self.append_message(label, &scalar.into_repr().to_bytes_le().as_slice());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
|
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
|
||||||
self.append_message(label, point.as_bytes());
|
let mut point_encoded = Vec::new();
|
||||||
|
point.serialize(&mut point_encoded).unwrap();
|
||||||
|
self.append_message(label, point_encoded.as_slice());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
|
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
|
||||||
let mut buf = [0u8; 64];
|
let mut buf = [0u8; 64];
|
||||||
self.challenge_bytes(label, &mut buf);
|
self.challenge_bytes(label, &mut buf);
|
||||||
Scalar::from_bytes_wide(&buf)
|
Scalar::from_le_bytes_mod_order(&buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
|
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ impl CompressedUniPoly {
|
|||||||
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
|
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
|
||||||
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
|
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
|
||||||
let mut linear_term =
|
let mut linear_term =
|
||||||
hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
|
(*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
|
||||||
for i in 1..self.coeffs_except_linear_term.len() {
|
for i in 1..self.coeffs_except_linear_term.len() {
|
||||||
linear_term -= self.coeffs_except_linear_term[i];
|
linear_term -= self.coeffs_except_linear_term[i];
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user