initial commit

This commit is contained in:
Georgios Konstantopoulos
2021-07-26 12:45:07 +03:00
commit b64f038283
46 changed files with 4299 additions and 0 deletions

115
src/circom_qap.rs Normal file
View File

@@ -0,0 +1,115 @@
use ark_ff::PrimeField;
use ark_groth16::r1cs_to_qap::{evaluate_constraint, QAPCalculator, R1CStoQAP};
use ark_poly::EvaluationDomain;
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
use core::ops::Deref;
/// Implements the witness map used by snarkjs. The arkworks witness map calculates the
/// coefficients of H through computing (AB-C)/Z in the evaluation domain and going back to the
/// coefficients domain. snarkjs instead precomputes the Lagrange form of the powers of tau bases
/// in a domain twice as large and the witness map is computed as the odd coefficients of (AB-C)
/// in that domain. This serves as HZ when computing the C proof element.
pub struct R1CStoQAPCircom;
impl QAPCalculator for R1CStoQAPCircom {
#[allow(clippy::type_complexity)]
fn instance_map_with_evaluation<F: PrimeField, D: EvaluationDomain<F>>(
cs: ConstraintSystemRef<F>,
t: &F,
) -> Result<(Vec<F>, Vec<F>, Vec<F>, F, usize, usize), SynthesisError> {
R1CStoQAP::instance_map_with_evaluation::<F, D>(cs, t)
}
fn witness_map<F: PrimeField, D: EvaluationDomain<F>>(
prover: ConstraintSystemRef<F>,
) -> Result<Vec<F>, SynthesisError> {
let matrices = prover.to_matrices().unwrap();
let zero = F::zero();
let num_inputs = prover.num_instance_variables();
let num_constraints = prover.num_constraints();
let cs = prover.borrow().unwrap();
let prover = cs.deref();
let full_assignment = [
prover.instance_assignment.as_slice(),
prover.witness_assignment.as_slice(),
]
.concat();
let domain =
D::new(num_constraints + num_inputs).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
let domain_size = domain.size();
let mut a = vec![zero; domain_size];
let mut b = vec![zero; domain_size];
cfg_iter_mut!(a[..num_constraints])
.zip(cfg_iter_mut!(b[..num_constraints]))
.zip(cfg_iter!(&matrices.a))
.zip(cfg_iter!(&matrices.b))
.for_each(|(((a, b), at_i), bt_i)| {
*a = evaluate_constraint(&at_i, &full_assignment);
*b = evaluate_constraint(&bt_i, &full_assignment);
});
{
let start = num_constraints;
let end = start + num_inputs;
a[start..end].clone_from_slice(&full_assignment[..num_inputs]);
}
domain.ifft_in_place(&mut a);
domain.ifft_in_place(&mut b);
let root_of_unity = {
let domain_size_double = 2 * domain_size;
let domain_double =
D::new(domain_size_double).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
domain_double.element(1)
};
D::distribute_powers_and_mul_by_const(&mut a, root_of_unity, F::one());
D::distribute_powers_and_mul_by_const(&mut b, root_of_unity, F::one());
domain.fft_in_place(&mut a);
domain.fft_in_place(&mut b);
let mut ab = domain.mul_polynomials_in_evaluation_domain(&a, &b);
drop(a);
drop(b);
let mut c = vec![zero; domain_size];
cfg_iter_mut!(c[..prover.num_constraints])
.enumerate()
.for_each(|(i, c)| {
*c = evaluate_constraint(&matrices.c[i], &full_assignment);
});
domain.ifft_in_place(&mut c);
D::distribute_powers_and_mul_by_const(&mut c, root_of_unity, F::one());
domain.fft_in_place(&mut c);
cfg_iter_mut!(ab)
.zip(c)
.for_each(|(ab_i, c_i)| *ab_i -= &c_i);
Ok(ab)
}
fn h_query_scalars<F: PrimeField, D: EvaluationDomain<F>>(
max_power: usize,
t: F,
_: F,
delta_inverse: F,
) -> Result<Vec<F>, SynthesisError> {
// the usual H query has domain-1 powers. Z has domain powers. So HZ has 2*domain-1 powers.
let mut scalars = cfg_into_iter!(0..2 * max_power + 1)
.map(|i| delta_inverse * t.pow([i as u64]))
.collect::<Vec<_>>();
let domain_size = scalars.len();
let domain = D::new(domain_size).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
// generate the lagrange coefficients
domain.ifft_in_place(&mut scalars);
Ok(cfg_into_iter!(scalars).skip(1).step_by(2).collect())
}
}

79
src/circom_wasm/circom.rs Normal file
View File

@@ -0,0 +1,79 @@
use color_eyre::Result;
use wasmer::{Function, Instance, Value};
#[derive(Clone, Debug)]
pub struct CircomInstance(Instance);
// binds to the circom functions
impl CircomInstance {
pub fn new(instance: Instance) -> Self {
CircomInstance(instance)
}
pub fn init(&self, sanity_check: bool) -> Result<()> {
let func = self.func("init");
func.call(&[Value::I32(sanity_check as i32)])?;
Ok(())
}
pub fn get_fr_len(&self) -> Result<i32> {
self.get_i32("getFrLen")
}
pub fn get_ptr_raw_prime(&self) -> Result<i32> {
self.get_i32("getPRawPrime")
}
pub fn get_n_vars(&self) -> Result<i32> {
self.get_i32("getNVars")
}
pub fn get_ptr_witness_buffer(&self) -> Result<i32> {
self.get_i32("getWitnessBuffer")
}
pub fn get_ptr_witness(&self, w: i32) -> Result<i32> {
let func = self.func("getPWitness");
let res = func.call(&[w.into()])?;
Ok(res[0].unwrap_i32())
}
pub fn get_signal_offset32(
&self,
p_sig_offset: u32,
component: u32,
hash_msb: u32,
hash_lsb: u32,
) -> Result<()> {
let func = self.func("getSignalOffset32");
func.call(&[
p_sig_offset.into(),
component.into(),
hash_msb.into(),
hash_lsb.into(),
])?;
Ok(())
}
pub fn set_signal(&self, c_idx: i32, component: i32, signal: i32, p_val: i32) -> Result<()> {
let func = self.func("setSignal");
func.call(&[c_idx.into(), component.into(), signal.into(), p_val.into()])?;
Ok(())
}
fn get_i32(&self, name: &str) -> Result<i32> {
let func = self.func(name);
let result = func.call(&[])?;
Ok(result[0].unwrap_i32())
}
fn func(&self, name: &str) -> &Function {
self.0
.exports
.get_function(name)
.unwrap_or_else(|_| panic!("function {} not found", name))
}
}

271
src/circom_wasm/memory.rs Normal file
View File

@@ -0,0 +1,271 @@
//! Safe-ish interface for reading and writing specific types to the WASM runtime's memory
use num_traits::ToPrimitive;
use wasmer::{Memory, MemoryView};
// TODO: Decide whether we want Ark here or if it should use a generic BigInt package
use ark_bn254::FrParameters;
use ark_ff::{BigInteger, BigInteger256, FpParameters, FromBytes, Zero};
use num_bigint::{BigInt, BigUint};
use color_eyre::Result;
use std::str::FromStr;
use std::{convert::TryFrom, ops::Deref};
#[derive(Clone, Debug)]
pub struct SafeMemory {
pub memory: Memory,
pub prime: BigInt,
short_max: BigInt,
short_min: BigInt,
r_inv: BigInt,
n32: usize,
}
impl Deref for SafeMemory {
type Target = Memory;
fn deref(&self) -> &Self::Target {
&self.memory
}
}
impl SafeMemory {
/// Creates a new SafeMemory
pub fn new(memory: Memory, n32: usize, prime: BigInt) -> Self {
// TODO: Figure out a better way to calculate these
let short_max = BigInt::from(0x8000_0000u64);
let short_min = BigInt::from_biguint(
num_bigint::Sign::NoSign,
BigUint::try_from(FrParameters::MODULUS).unwrap(),
) - &short_max;
let r_inv = BigInt::from_str(
"9915499612839321149637521777990102151350674507940716049588462388200839649614",
)
.unwrap();
Self {
memory,
prime,
short_max,
short_min,
r_inv,
n32,
}
}
/// Gets an immutable view to the memory in 32 byte chunks
pub fn view(&self) -> MemoryView<u32> {
self.memory.view()
}
/// Returns the next free position in the memory
pub fn free_pos(&self) -> u32 {
self.view()[0].get()
}
/// Sets the next free position in the memory
pub fn set_free_pos(&mut self, ptr: u32) {
self.write_u32(0, ptr);
}
/// Allocates a U32 in memory
pub fn alloc_u32(&mut self) -> u32 {
let p = self.free_pos();
self.set_free_pos(p + 8);
p
}
/// Writes a u32 to the specified memory offset
pub fn write_u32(&mut self, ptr: usize, num: u32) {
let buf = unsafe { self.memory.data_unchecked_mut() };
buf[ptr..ptr + std::mem::size_of::<u32>()].copy_from_slice(&num.to_le_bytes());
}
/// Reads a u32 from the specified memory offset
pub fn read_u32(&self, ptr: usize) -> u32 {
let buf = unsafe { self.memory.data_unchecked() };
let mut bytes = [0; 4];
bytes.copy_from_slice(&buf[ptr..ptr + std::mem::size_of::<u32>()]);
u32::from_le_bytes(bytes)
}
/// Allocates `self.n32 * 4 + 8` bytes in the memory
pub fn alloc_fr(&mut self) -> u32 {
let p = self.free_pos();
self.set_free_pos(p + self.n32 as u32 * 4 + 8);
p
}
/// Writes a Field Element to memory at the specified offset, truncating
/// to smaller u32 types if needed and adjusting the sign via 2s complement
pub fn write_fr(&mut self, ptr: usize, fr: &BigInt) -> Result<()> {
if fr < &self.short_max && fr > &self.short_min {
if fr >= &BigInt::zero() {
self.write_short_positive(ptr, fr)?;
} else {
self.write_short_negative(ptr, fr)?;
}
} else {
self.write_long_normal(ptr, fr)?;
}
Ok(())
}
/// Reads a Field Element from the memory at the specified offset
pub fn read_fr(&self, ptr: usize) -> Result<BigInt> {
let view = self.memory.view::<u8>();
let res = if view[ptr + 4 + 3].get() & 0x80 != 0 {
let mut num = self.read_big(ptr + 8, self.n32)?;
if view[ptr + 4 + 3].get() & 0x40 != 0 {
num = (num * &self.r_inv) % &self.prime
}
num
} else if view[ptr + 3].get() & 0x40 != 0 {
let mut num = self.read_u32(ptr).into();
// handle small negative
num -= BigInt::from(0x100000000i64);
num
} else {
self.read_u32(ptr).into()
};
Ok(res)
}
fn write_short_positive(&mut self, ptr: usize, fr: &BigInt) -> Result<()> {
let num = fr.to_i32().expect("not a short positive");
self.write_u32(ptr, num as u32);
self.write_u32(ptr + 4, 0);
Ok(())
}
fn write_short_negative(&mut self, ptr: usize, fr: &BigInt) -> Result<()> {
// 2s complement
let num = fr - &self.short_min;
let num = num - &self.short_max;
let num = num + BigInt::from(0x0001_0000_0000i64);
let num = num
.to_u32()
.expect("could not cast as u32 (should never happen)");
self.write_u32(ptr, num);
self.write_u32(ptr + 4, 0);
Ok(())
}
fn write_long_normal(&mut self, ptr: usize, fr: &BigInt) -> Result<()> {
self.write_u32(ptr, 0);
self.write_u32(ptr + 4, i32::MIN as u32); // 0x80000000
self.write_big(ptr + 8, fr)?;
Ok(())
}
fn write_big(&self, ptr: usize, num: &BigInt) -> Result<()> {
let buf = unsafe { self.memory.data_unchecked_mut() };
// TODO: How do we handle negative bignums?
let (_, num) = num.clone().into_parts();
let num = BigInteger256::try_from(num).unwrap();
let bytes = num.to_bytes_le();
let len = bytes.len();
buf[ptr..ptr + len].copy_from_slice(&bytes);
Ok(())
}
/// Reads `num_bytes * 32` from the specified memory offset in a Big Integer
pub fn read_big(&self, ptr: usize, num_bytes: usize) -> Result<BigInt> {
let buf = unsafe { self.memory.data_unchecked() };
let buf = &buf[ptr..ptr + num_bytes * 32];
// TODO: Is there a better way to read big integers?
let big = BigInteger256::read(buf).unwrap();
let big = BigUint::try_from(big).unwrap();
Ok(big.into())
}
}
// TODO: Figure out how to read / write numbers > u32
// circom-witness-calculator: Wasm + Memory -> expose BigInts so that they can be consumed by any proof system
// ark-circom:
// 1. can read zkey
// 2. can generate witness from inputs
// 3. can generate proofs
// 4. can serialize proofs in the desired format
#[cfg(test)]
mod tests {
use super::*;
use num_traits::ToPrimitive;
use std::str::FromStr;
use wasmer::{MemoryType, Store};
fn new() -> SafeMemory {
SafeMemory::new(
Memory::new(&Store::default(), MemoryType::new(1, None, false)).unwrap(),
2,
BigInt::from_str(
"21888242871839275222246405745257275088548364400416034343698204186575808495617",
)
.unwrap(),
)
}
#[test]
fn i32_bounds() {
let mem = new();
let i32_max = i32::MAX as i64 + 1;
assert_eq!(mem.short_min.to_i64().unwrap(), -i32_max);
assert_eq!(mem.short_max.to_i64().unwrap(), i32_max);
}
#[test]
fn read_write_32() {
let mut mem = new();
let num = u32::MAX;
let inp = mem.read_u32(0);
assert_eq!(inp, 0);
mem.write_u32(0, num);
let inp = mem.read_u32(0);
assert_eq!(inp, num);
}
#[test]
fn read_write_fr_small_positive() {
read_write_fr(BigInt::from(1_000_000));
}
#[test]
fn read_write_fr_small_negative() {
read_write_fr(BigInt::from(-1_000_000));
}
#[test]
fn read_write_fr_big_positive() {
read_write_fr(BigInt::from(500000000000i64));
}
// TODO: How should this be handled?
#[test]
#[ignore]
fn read_write_fr_big_negative() {
read_write_fr(BigInt::from_str("-500000000000").unwrap())
}
fn read_write_fr(num: BigInt) {
let mut mem = new();
mem.write_fr(0, &num).unwrap();
let res = mem.read_fr(0).unwrap();
assert_eq!(res, num);
}
}

21
src/circom_wasm/mod.rs Normal file
View File

@@ -0,0 +1,21 @@
mod wasm;
pub use wasm::WitnessCalculator;
mod memory;
pub use memory::SafeMemory;
mod circom;
pub use circom::CircomInstance;
use fnv::FnvHasher;
use std::hash::Hasher;
pub use num_bigint::BigInt;
pub(crate) fn fnv(inp: &str) -> (u32, u32) {
let mut hasher = FnvHasher::default();
hasher.write(inp.as_bytes());
let h = hasher.finish();
((h >> 32) as u32, h as u32)
}

274
src/circom_wasm/wasm.rs Normal file
View File

@@ -0,0 +1,274 @@
use color_eyre::Result;
use num_bigint::BigInt;
use num_traits::Zero;
use std::cell::Cell;
use wasmer::{imports, Function, Instance, Memory, MemoryType, Module, Store};
use super::{fnv, CircomInstance, SafeMemory};
#[derive(Clone, Debug)]
pub struct WitnessCalculator {
pub instance: CircomInstance,
pub memory: SafeMemory,
pub n64: i32,
}
impl WitnessCalculator {
pub fn new(path: impl AsRef<std::path::Path>) -> Result<Self> {
let store = Store::default();
let module = Module::from_file(&store, path)?;
// Set up the memory
let memory = Memory::new(&store, MemoryType::new(2000, None, false)).unwrap();
let import_object = imports! {
"env" => {
"memory" => memory.clone(),
},
// Host function callbacks from the WASM
"runtime" => {
"error" => runtime::error(&store),
"logSetSignal" => runtime::log_signal(&store),
"logGetSignal" => runtime::log_signal(&store),
"logFinishComponent" => runtime::log_component(&store),
"logStartComponent" => runtime::log_component(&store),
"log" => runtime::log_component(&store),
}
};
let instance = CircomInstance::new(Instance::new(&module, &import_object)?);
let n32 = (instance.get_fr_len()? >> 2) - 2;
let mut memory = SafeMemory::new(memory, n32 as usize, BigInt::zero());
let ptr = instance.get_ptr_raw_prime()?;
let prime = memory.read_big(ptr as usize, n32 as usize)?;
let n64 = ((prime.bits() - 1) / 64 + 1) as i32;
memory.prime = prime;
Ok(WitnessCalculator {
instance,
memory,
n64,
})
}
pub fn calculate_witness<I: IntoIterator<Item = (String, Vec<BigInt>)>>(
&mut self,
inputs: I,
sanity_check: bool,
) -> Result<Vec<BigInt>> {
let old_mem_free_pos = self.memory.free_pos();
self.instance.init(sanity_check)?;
let p_sig_offset = self.memory.alloc_u32();
let p_fr = self.memory.alloc_fr();
// allocate the inputs
for (name, values) in inputs.into_iter() {
let (msb, lsb) = fnv(&name);
self.instance
.get_signal_offset32(p_sig_offset, 0, msb, lsb)?;
let sig_offset = self.memory.read_u32(p_sig_offset as usize) as usize;
for (i, value) in values.into_iter().enumerate() {
self.memory.write_fr(p_fr as usize, &value)?;
self.instance
.set_signal(0, 0, (sig_offset + i) as i32, p_fr as i32)?;
}
}
let mut w = Vec::new();
let n_vars = self.instance.get_n_vars()?;
for i in 0..n_vars {
let ptr = self.instance.get_ptr_witness(i)? as usize;
let el = self.memory.read_fr(ptr)?;
w.push(el);
}
self.memory.set_free_pos(old_mem_free_pos);
Ok(w)
}
pub fn get_witness_buffer(&self) -> Result<Vec<u8>> {
let ptr = self.instance.get_ptr_witness_buffer()? as usize;
let view = self.memory.memory.view::<u8>();
let len = self.instance.get_n_vars()? * self.n64 * 8;
let arr = view[ptr..ptr + len as usize]
.iter()
.map(Cell::get)
.collect::<Vec<_>>();
Ok(arr)
}
}
// callback hooks for debugging
mod runtime {
use super::*;
pub fn error(store: &Store) -> Function {
#[allow(unused)]
#[allow(clippy::many_single_char_names)]
fn func(a: i32, b: i32, c: i32, d: i32, e: i32, f: i32) {}
Function::new_native(&store, func)
}
pub fn log_signal(store: &Store) -> Function {
#[allow(unused)]
fn func(a: i32, b: i32) {}
Function::new_native(&store, func)
}
pub fn log_component(store: &Store) -> Function {
#[allow(unused)]
fn func(a: i32) {}
Function::new_native(&store, func)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{collections::HashMap, path::PathBuf};
struct TestCase<'a> {
circuit_path: &'a str,
inputs_path: &'a str,
n_vars: u32,
n64: u32,
witness: &'a [&'a str],
}
pub fn root_path(p: &str) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push(p);
path.to_string_lossy().to_string()
}
#[test]
fn multiplier_1() {
run_test(TestCase {
circuit_path: root_path("test-vectors/mycircuit.wasm").as_str(),
inputs_path: root_path("test-vectors/mycircuit-input1.json").as_str(),
n_vars: 4,
n64: 4,
witness: &["1", "33", "3", "11"],
});
}
#[test]
fn multiplier_2() {
run_test(TestCase {
circuit_path: root_path("test-vectors/mycircuit.wasm").as_str(),
inputs_path: root_path("test-vectors/mycircuit-input2.json").as_str(),
n_vars: 4,
n64: 4,
witness: &[
"1",
"21888242871839275222246405745257275088548364400416034343698204186575672693159",
"21888242871839275222246405745257275088548364400416034343698204186575796149939",
"11",
],
});
}
#[test]
fn multiplier_3() {
run_test(TestCase {
circuit_path: root_path("test-vectors/mycircuit.wasm").as_str(),
inputs_path: root_path("test-vectors/mycircuit-input3.json").as_str(),
n_vars: 4,
n64: 4,
witness: &[
"1",
"21888242871839275222246405745257275088548364400416034343698204186575808493616",
"10944121435919637611123202872628637544274182200208017171849102093287904246808",
"2",
],
});
}
#[test]
fn safe_multipler() {
let witness =
std::fs::read_to_string(&root_path("test-vectors/safe-circuit-witness.json")).unwrap();
let witness: Vec<String> = serde_json::from_str(&witness).unwrap();
let witness = &witness.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
run_test(TestCase {
circuit_path: root_path("test-vectors/circuit2.wasm").as_str(),
inputs_path: root_path("test-vectors/mycircuit-input1.json").as_str(),
n_vars: 132, // 128 + 4
n64: 4,
witness,
});
}
#[test]
fn smt_verifier() {
let witness =
std::fs::read_to_string(&root_path("test-vectors/smtverifier10-witness.json")).unwrap();
let witness: Vec<String> = serde_json::from_str(&witness).unwrap();
let witness = &witness.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
run_test(TestCase {
circuit_path: root_path("test-vectors/smtverifier10.wasm").as_str(),
inputs_path: root_path("test-vectors/smtverifier10-input.json").as_str(),
n_vars: 4794,
n64: 4,
witness,
});
}
use serde_json::Value;
use std::str::FromStr;
fn value_to_bigint(v: Value) -> BigInt {
match v {
Value::String(inner) => BigInt::from_str(&inner).unwrap(),
Value::Number(inner) => BigInt::from(inner.as_u64().expect("not a u32")),
_ => panic!("unsupported type"),
}
}
fn run_test(case: TestCase) {
let mut wtns = WitnessCalculator::new(case.circuit_path).unwrap();
assert_eq!(
wtns.memory.prime.to_str_radix(16),
"30644E72E131A029B85045B68181585D2833E84879B9709143E1F593F0000001".to_lowercase()
);
assert_eq!(wtns.instance.get_n_vars().unwrap() as u32, case.n_vars);
assert_eq!(wtns.n64 as u32, case.n64);
let inputs_str = std::fs::read_to_string(case.inputs_path).unwrap();
let inputs: std::collections::HashMap<String, serde_json::Value> =
serde_json::from_str(&inputs_str).unwrap();
let inputs = inputs
.iter()
.map(|(key, value)| {
let res = match value {
Value::String(inner) => {
vec![BigInt::from_str(inner).unwrap()]
}
Value::Number(inner) => {
vec![BigInt::from(inner.as_u64().expect("not a u32"))]
}
Value::Array(inner) => inner.iter().cloned().map(value_to_bigint).collect(),
_ => panic!(),
};
(key.clone(), res)
})
.collect::<HashMap<_, _>>();
let res = wtns.calculate_witness(inputs, false).unwrap();
for (r, w) in res.iter().zip(case.witness) {
assert_eq!(r, &BigInt::from_str(w).unwrap());
}
}
}

95
src/circuit/builder.rs Normal file
View File

@@ -0,0 +1,95 @@
use ark_ec::PairingEngine;
use std::{fs::File, path::Path};
use super::{CircomCircuit, R1CS};
use num_bigint::BigInt;
use std::collections::HashMap;
use crate::{circuit::R1CSFile, WitnessCalculator};
use color_eyre::Result;
pub struct CircomBuilder<E: PairingEngine> {
pub cfg: CircuitConfig<E>,
pub inputs: HashMap<String, Vec<BigInt>>,
}
// Add utils for creating this from files / directly from bytes
pub struct CircuitConfig<E: PairingEngine> {
pub r1cs: R1CS<E>,
pub wtns: WitnessCalculator,
pub sanity_check: bool,
}
impl<E: PairingEngine> CircuitConfig<E> {
pub fn new(wtns: impl AsRef<Path>, r1cs: impl AsRef<Path>) -> Result<Self> {
let wtns = WitnessCalculator::new(wtns).unwrap();
let reader = File::open(r1cs)?;
let r1cs = R1CSFile::new(reader)?.into();
Ok(Self {
wtns,
r1cs,
sanity_check: false,
})
}
}
impl<E: PairingEngine> CircomBuilder<E> {
/// Instantiates a new builder using the provided WitnessGenerator and R1CS files
/// for your circuit
pub fn new(cfg: CircuitConfig<E>) -> Self {
Self {
cfg,
inputs: HashMap::new(),
}
}
/// Pushes a Circom input at the specified name.
pub fn push_input<T: Into<BigInt>>(&mut self, name: impl ToString, val: T) {
let values = self.inputs.entry(name.to_string()).or_insert_with(Vec::new);
values.push(val.into());
}
/// Generates an empty circom circuit with no witness set, to be used for
/// generation of the trusted setup parameters
pub fn setup(&self) -> CircomCircuit<E> {
let mut circom = CircomCircuit {
r1cs: self.cfg.r1cs.clone(),
witness: None,
};
// Disable the wire mapping
circom.r1cs.wire_mapping = None;
circom
}
/// Creates the circuit populated with the witness corresponding to the previously
/// provided inputs
pub fn build(mut self) -> Result<CircomCircuit<E>> {
let mut circom = self.setup();
// calculate the witness
let witness = self
.cfg
.wtns
.calculate_witness(self.inputs, self.cfg.sanity_check)?;
// convert it to field elements
let witness = witness
.into_iter()
.map(|w| E::Fr::from(w.to_biguint().unwrap()))
.collect::<Vec<_>>();
circom.witness = Some(witness);
// sanity check
debug_assert!({
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
let cs = ConstraintSystem::<E::Fr>::new_ref();
circom.clone().generate_constraints(cs.clone()).unwrap();
cs.is_satisfied().unwrap()
});
Ok(circom)
}
}

107
src/circuit/circom.rs Normal file
View File

@@ -0,0 +1,107 @@
use ark_ec::PairingEngine;
use ark_relations::r1cs::{
ConstraintSynthesizer, ConstraintSystemRef, LinearCombination, SynthesisError, Variable,
};
use crate::circuit::R1CS;
use color_eyre::Result;
#[derive(Clone, Debug)]
pub struct CircomCircuit<E: PairingEngine> {
pub r1cs: R1CS<E>,
pub witness: Option<Vec<E::Fr>>,
}
impl<'a, E: PairingEngine> CircomCircuit<E> {
pub fn get_public_inputs(&self) -> Option<Vec<E::Fr>> {
match &self.witness {
None => None,
Some(w) => match &self.r1cs.wire_mapping {
None => Some(w[1..self.r1cs.num_inputs].to_vec()),
Some(m) => Some(m[1..self.r1cs.num_inputs].iter().map(|i| w[*i]).collect()),
},
}
}
}
impl<E: PairingEngine> ConstraintSynthesizer<E::Fr> for CircomCircuit<E> {
fn generate_constraints(self, cs: ConstraintSystemRef<E::Fr>) -> Result<(), SynthesisError> {
let witness = &self.witness;
let wire_mapping = &self.r1cs.wire_mapping;
// Start from 1 because Arkworks implicitly allocates One for the first input
for i in 1..self.r1cs.num_inputs {
cs.new_input_variable(|| {
Ok(match witness {
None => E::Fr::from(1u32),
Some(w) => match wire_mapping {
Some(m) => w[m[i]],
None => w[i],
},
})
})?;
}
for i in 0..self.r1cs.num_aux {
cs.new_witness_variable(|| {
Ok(match witness {
None => E::Fr::from(1u32),
Some(w) => match wire_mapping {
Some(m) => w[m[i + self.r1cs.num_inputs]],
None => w[i + self.r1cs.num_inputs],
},
})
})?;
}
let make_index = |index| {
if index < self.r1cs.num_inputs {
Variable::Instance(index)
} else {
Variable::Witness(index - self.r1cs.num_inputs)
}
};
let make_lc = |lc_data: &[(usize, E::Fr)]| {
lc_data.iter().fold(
LinearCombination::<E::Fr>::zero(),
|lc: LinearCombination<E::Fr>, (index, coeff)| lc + (*coeff, make_index(*index)),
)
};
for constraint in &self.r1cs.constraints {
cs.enforce_constraint(
make_lc(&constraint.0),
make_lc(&constraint.1),
make_lc(&constraint.2),
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{CircomBuilder, CircuitConfig};
use ark_bn254::{Bn254, Fr};
use ark_relations::r1cs::ConstraintSystem;
#[test]
fn satisfied() {
let cfg = CircuitConfig::<Bn254>::new(
"./test-vectors/mycircuit.wasm",
"./test-vectors/mycircuit.r1cs",
)
.unwrap();
let mut builder = CircomBuilder::new(cfg);
builder.push_input("a", 3);
builder.push_input("b", 11);
let circom = builder.build().unwrap();
let cs = ConstraintSystem::<Fr>::new_ref();
circom.generate_constraints(cs.clone()).unwrap();
assert!(cs.is_satisfied().unwrap());
}
}

13
src/circuit/mod.rs Normal file
View File

@@ -0,0 +1,13 @@
use ark_ec::PairingEngine;
pub mod r1cs_reader;
pub use r1cs_reader::{R1CSFile, R1CS};
mod circom;
pub use circom::CircomCircuit;
mod builder;
pub use builder::{CircomBuilder, CircuitConfig};
pub type Constraints<E> = (ConstraintVec<E>, ConstraintVec<E>, ConstraintVec<E>);
pub type ConstraintVec<E> = Vec<(usize, <E as PairingEngine>::Fr)>;

270
src/circuit/r1cs_reader.rs Normal file
View File

@@ -0,0 +1,270 @@
//! R1CS circom file reader
//! Copied from https://github.com/poma/zkutil
use byteorder::{LittleEndian, ReadBytesExt};
use std::io::{Error, ErrorKind, Result};
use ark_ec::PairingEngine;
use ark_ff::FromBytes;
use ark_std::io::Read;
use crate::circuit::{ConstraintVec, Constraints};
#[derive(Clone, Debug)]
pub struct R1CS<E: PairingEngine> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_variables: usize,
pub constraints: Vec<Constraints<E>>,
pub wire_mapping: Option<Vec<usize>>,
}
impl<E: PairingEngine> From<R1CSFile<E>> for R1CS<E> {
fn from(file: R1CSFile<E>) -> Self {
let num_inputs = (1 + file.header.n_pub_in + file.header.n_pub_out) as usize;
let num_variables = file.header.n_wires as usize;
let num_aux = num_variables - num_inputs;
R1CS {
num_aux,
num_inputs,
num_variables,
constraints: file.constraints,
wire_mapping: Some(file.wire_mapping.iter().map(|e| *e as usize).collect()),
}
}
}
pub struct R1CSFile<E: PairingEngine> {
pub version: u32,
pub header: Header,
pub constraints: Vec<Constraints<E>>,
pub wire_mapping: Vec<u64>,
}
impl<E: PairingEngine> R1CSFile<E> {
pub fn new<R: Read>(mut reader: R) -> Result<R1CSFile<E>> {
let mut magic = [0u8; 4];
reader.read_exact(&mut magic)?;
if magic != [0x72, 0x31, 0x63, 0x73] {
// magic = "r1cs"
return Err(Error::new(ErrorKind::InvalidData, "Invalid magic number"));
}
let version = reader.read_u32::<LittleEndian>()?;
if version != 1 {
return Err(Error::new(ErrorKind::InvalidData, "Unsupported version"));
}
let _num_sections = reader.read_u32::<LittleEndian>()?;
// todo: rewrite this to support different section order and unknown sections
// todo: handle sec_size correctly
let _sec_type = reader.read_u32::<LittleEndian>()?;
let sec_size = reader.read_u64::<LittleEndian>()?;
let header = Header::new(&mut reader, sec_size)?;
let _sec_type = reader.read_u32::<LittleEndian>()?;
let _sec_size = reader.read_u64::<LittleEndian>()?;
let constraints = read_constraints::<&mut R, E>(&mut reader, &header)?;
let _sec_type = reader.read_u32::<LittleEndian>()?;
let sec_size = reader.read_u64::<LittleEndian>()?;
let wire_mapping = read_map(&mut reader, sec_size, &header)?;
Ok(R1CSFile {
version,
header,
constraints,
wire_mapping,
})
}
}
pub struct Header {
pub field_size: u32,
pub prime_size: Vec<u8>,
pub n_wires: u32,
pub n_pub_out: u32,
pub n_pub_in: u32,
pub n_prv_in: u32,
pub n_labels: u64,
pub n_constraints: u32,
}
impl Header {
fn new<R: Read>(mut reader: R, size: u64) -> Result<Header> {
let field_size = reader.read_u32::<LittleEndian>()?;
if field_size != 32 {
return Err(Error::new(
ErrorKind::InvalidData,
"This parser only supports 32-byte fields",
));
}
if size != 32 + field_size as u64 {
return Err(Error::new(
ErrorKind::InvalidData,
"Invalid header section size",
));
}
let mut prime_size = vec![0u8; field_size as usize];
reader.read_exact(&mut prime_size)?;
if prime_size
!= hex::decode("010000f093f5e1439170b97948e833285d588181b64550b829a031e1724e6430")
.unwrap()
{
return Err(Error::new(
ErrorKind::InvalidData,
"This parser only supports bn256",
));
}
Ok(Header {
field_size,
prime_size,
n_wires: reader.read_u32::<LittleEndian>()?,
n_pub_out: reader.read_u32::<LittleEndian>()?,
n_pub_in: reader.read_u32::<LittleEndian>()?,
n_prv_in: reader.read_u32::<LittleEndian>()?,
n_labels: reader.read_u64::<LittleEndian>()?,
n_constraints: reader.read_u32::<LittleEndian>()?,
})
}
}
fn read_constraint_vec<R: Read, E: PairingEngine>(mut reader: R) -> Result<ConstraintVec<E>> {
let n_vec = reader.read_u32::<LittleEndian>()? as usize;
let mut vec = Vec::with_capacity(n_vec);
for _ in 0..n_vec {
vec.push((
reader.read_u32::<LittleEndian>()? as usize,
E::Fr::read(&mut reader)?,
));
}
Ok(vec)
}
fn read_constraints<R: Read, E: PairingEngine>(
mut reader: R,
header: &Header,
) -> Result<Vec<Constraints<E>>> {
// todo check section size
let mut vec = Vec::with_capacity(header.n_constraints as usize);
for _ in 0..header.n_constraints {
vec.push((
read_constraint_vec::<&mut R, E>(&mut reader)?,
read_constraint_vec::<&mut R, E>(&mut reader)?,
read_constraint_vec::<&mut R, E>(&mut reader)?,
));
}
Ok(vec)
}
fn read_map<R: Read>(mut reader: R, size: u64, header: &Header) -> Result<Vec<u64>> {
if size != header.n_wires as u64 * 8 {
return Err(Error::new(
ErrorKind::InvalidData,
"Invalid map section size",
));
}
let mut vec = Vec::with_capacity(header.n_wires as usize);
for _ in 0..header.n_wires {
vec.push(reader.read_u64::<LittleEndian>()?);
}
if vec[0] != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
"Wire 0 should always be mapped to 0",
));
}
Ok(vec)
}
#[cfg(test)]
mod tests {
use super::*;
use ark_bn254::{Bn254, Fr};
#[test]
fn sample() {
let data = hex_literal::hex!(
"
72316373
01000000
03000000
01000000 40000000 00000000
20000000
010000f0 93f5e143 9170b979 48e83328 5d588181 b64550b8 29a031e1 724e6430
07000000
01000000
02000000
03000000
e8030000 00000000
03000000
02000000 88020000 00000000
02000000
05000000 03000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
06000000 08000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
03000000
00000000 02000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
02000000 14000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
03000000 0C000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
02000000
00000000 05000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
02000000 07000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
03000000
01000000 04000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
04000000 08000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
05000000 03000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
02000000
03000000 2C000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
06000000 06000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
00000000
01000000
06000000 04000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
03000000
00000000 06000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
02000000 0B000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
03000000 05000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
01000000
06000000 58020000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
03000000 38000000 00000000
00000000 00000000
03000000 00000000
0a000000 00000000
0b000000 00000000
0c000000 00000000
0f000000 00000000
44010000 00000000
"
);
let file = R1CSFile::<Bn254>::new(&data[..]).unwrap();
assert_eq!(file.version, 1);
assert_eq!(file.header.field_size, 32);
assert_eq!(
file.header.prime_size,
hex::decode("010000f093f5e1439170b97948e833285d588181b64550b829a031e1724e6430")
.unwrap(),
);
assert_eq!(file.header.n_wires, 7);
assert_eq!(file.header.n_pub_out, 1);
assert_eq!(file.header.n_pub_in, 2);
assert_eq!(file.header.n_prv_in, 3);
assert_eq!(file.header.n_labels, 0x03e8);
assert_eq!(file.header.n_constraints, 3);
assert_eq!(file.constraints.len(), 3);
assert_eq!(file.constraints[0].0.len(), 2);
assert_eq!(file.constraints[0].0[0].0, 5);
assert_eq!(file.constraints[0].0[0].1, Fr::from(3));
assert_eq!(file.constraints[2].1[0].0, 0);
assert_eq!(file.constraints[2].1[0].1, Fr::from(6));
assert_eq!(file.constraints[1].2.len(), 0);
assert_eq!(file.wire_mapping.len(), 7);
assert_eq!(file.wire_mapping[1], 3);
}
}

271
src/ethereum.rs Normal file
View File

@@ -0,0 +1,271 @@
//! Helpers for converting Arkworks types to U256-tuples as expected by the
//! Solidity Groth16 Verifier smart contracts
use ark_ff::{BigInteger, FromBytes, PrimeField};
use ethers::types::U256;
use ark_bn254::{Bn254, Fq2, Fr, G1Affine, G2Affine};
pub struct Inputs(pub Vec<U256>);
impl From<&[Fr]> for Inputs {
fn from(src: &[Fr]) -> Self {
let els = src.iter().map(|point| point_to_u256(*point)).collect();
Self(els)
}
}
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct G1 {
pub x: U256,
pub y: U256,
}
impl From<G1> for G1Affine {
fn from(src: G1) -> G1Affine {
let x = u256_to_point(src.x);
let y = u256_to_point(src.y);
G1Affine::new(x, y, false)
}
}
type G1Tup = (U256, U256);
impl G1 {
pub fn as_tuple(&self) -> (U256, U256) {
(self.x, self.y)
}
}
impl From<&G1Affine> for G1 {
fn from(p: &G1Affine) -> Self {
Self {
x: point_to_u256(p.x),
y: point_to_u256(p.y),
}
}
}
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct G2 {
pub x: [U256; 2],
pub y: [U256; 2],
}
impl From<G2> for G2Affine {
fn from(src: G2) -> G2Affine {
let c0 = u256_to_point(src.x[0]);
let c1 = u256_to_point(src.x[1]);
let x = Fq2::new(c0, c1);
let c0 = u256_to_point(src.y[0]);
let c1 = u256_to_point(src.y[1]);
let y = Fq2::new(c0, c1);
G2Affine::new(x, y, false)
}
}
type G2Tup = ([U256; 2], [U256; 2]);
impl G2 {
// NB: Serialize the c1 limb first.
pub fn as_tuple(&self) -> G2Tup {
([self.x[1], self.x[0]], [self.y[1], self.y[0]])
}
}
impl From<&G2Affine> for G2 {
fn from(p: &G2Affine) -> Self {
Self {
x: [point_to_u256(p.x.c0), point_to_u256(p.x.c1)],
y: [point_to_u256(p.y.c0), point_to_u256(p.y.c1)],
}
}
}
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Proof {
a: G1,
b: G2,
c: G1,
}
impl Proof {
pub fn as_tuple(&self) -> (G1Tup, G2Tup, G1Tup) {
(self.a.as_tuple(), self.b.as_tuple(), self.c.as_tuple())
}
}
impl From<ark_groth16::Proof<Bn254>> for Proof {
fn from(proof: ark_groth16::Proof<Bn254>) -> Self {
Self {
a: G1::from(&proof.a),
b: G2::from(&proof.b),
c: G1::from(&proof.c),
}
}
}
impl From<Proof> for ark_groth16::Proof<Bn254> {
fn from(src: Proof) -> ark_groth16::Proof<Bn254> {
ark_groth16::Proof {
a: src.a.into(),
b: src.b.into(),
c: src.c.into(),
}
}
}
#[derive(Default, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct VerifyingKey {
pub alpha1: G1,
pub beta2: G2,
pub gamma2: G2,
pub delta2: G2,
pub ic: Vec<G1>,
}
impl VerifyingKey {
pub fn as_tuple(&self) -> (G1Tup, G2Tup, G2Tup, G2Tup, Vec<G1Tup>) {
(
self.alpha1.as_tuple(),
self.beta2.as_tuple(),
self.gamma2.as_tuple(),
self.delta2.as_tuple(),
self.ic.iter().map(|i| i.as_tuple()).collect(),
)
}
}
impl From<ark_groth16::VerifyingKey<Bn254>> for VerifyingKey {
fn from(vk: ark_groth16::VerifyingKey<Bn254>) -> Self {
Self {
alpha1: G1::from(&vk.alpha_g1),
beta2: G2::from(&vk.beta_g2),
gamma2: G2::from(&vk.gamma_g2),
delta2: G2::from(&vk.delta_g2),
ic: vk.gamma_abc_g1.iter().map(G1::from).collect(),
}
}
}
impl From<VerifyingKey> for ark_groth16::VerifyingKey<Bn254> {
fn from(src: VerifyingKey) -> ark_groth16::VerifyingKey<Bn254> {
ark_groth16::VerifyingKey {
alpha_g1: src.alpha1.into(),
beta_g2: src.beta2.into(),
gamma_g2: src.gamma2.into(),
delta_g2: src.delta2.into(),
gamma_abc_g1: src.ic.into_iter().map(Into::into).collect(),
}
}
}
// Helper for converting a PrimeField to its U256 representation for Ethereum compatibility
fn u256_to_point<F: PrimeField>(point: U256) -> F {
let mut buf = [0; 32];
point.to_little_endian(&mut buf);
let bigint = F::BigInt::read(&buf[..]).expect("always works");
F::from_repr(bigint).expect("alwasy works")
}
// Helper for converting a PrimeField to its U256 representation for Ethereum compatibility
// (U256 reads data as big endian)
fn point_to_u256<F: PrimeField>(point: F) -> U256 {
let point = point.into_repr();
let point_bytes = point.to_bytes_be();
U256::from(&point_bytes[..])
}
#[cfg(test)]
mod tests {
use super::*;
use ark_bn254::Fq;
fn fq() -> Fq {
Fq::from(2)
}
fn fq2() -> Fq2 {
Fq2::from(2)
}
fn fr() -> Fr {
Fr::from(2)
}
fn g1() -> G1Affine {
G1Affine::new(fq(), fq(), false)
}
fn g2() -> G2Affine {
G2Affine::new(fq2(), fq2(), false)
}
#[test]
fn convert_fq() {
let el = fq();
let el2 = point_to_u256(el);
let el3: Fq = u256_to_point(el2);
let el4 = point_to_u256(el3);
assert_eq!(el, el3);
assert_eq!(el2, el4);
}
#[test]
fn convert_fr() {
let el = fr();
let el2 = point_to_u256(el);
let el3: Fr = u256_to_point(el2);
let el4 = point_to_u256(el3);
assert_eq!(el, el3);
assert_eq!(el2, el4);
}
#[test]
fn convert_g1() {
let el = g1();
let el2 = G1::from(&el);
let el3: G1Affine = el2.into();
let el4 = G1::from(&el3);
assert_eq!(el, el3);
assert_eq!(el2, el4);
}
#[test]
fn convert_g2() {
let el = g2();
let el2 = G2::from(&el);
let el3: G2Affine = el2.into();
let el4 = G2::from(&el3);
assert_eq!(el, el3);
assert_eq!(el2, el4);
}
#[test]
fn convert_vk() {
let vk = ark_groth16::VerifyingKey::<Bn254> {
alpha_g1: g1(),
beta_g2: g2(),
gamma_g2: g2(),
delta_g2: g2(),
gamma_abc_g1: vec![g1(), g1(), g1()],
};
let vk_ethers = VerifyingKey::from(vk.clone());
let ark_vk: ark_groth16::VerifyingKey<Bn254> = vk_ethers.into();
assert_eq!(ark_vk, vk);
}
#[test]
fn convert_proof() {
let p = ark_groth16::Proof::<Bn254> {
a: g1(),
b: g2(),
c: g1(),
};
let p2 = Proof::from(p.clone());
let p3 = ark_groth16::Proof::from(p2);
assert_eq!(p, p3);
}
}

79
src/lib.rs Normal file
View File

@@ -0,0 +1,79 @@
//! Arkworks - Circom Compatibility layer
//!
//! Given a Circom WASM-compiled witness.wasm, it can read it and calculate the corresponding
//!
//! ## WASM Witness Generator
//!
//! ## Types
//! * ZKey
//! * WTNS
//! * R1CS
//! * WASM
//! * Sys?
//!
//! Inputs:
//! * circuit.wasm
//! * input.json
//!
//! Outputs:
//! * witness.wtns
//!
//! Given a circuit WASM and an input.json calculates the corresponding witness
//!
//! ## Proof calculator
//!
//! Inputs:
//! * witness.wtns / witness.json
//! * circuit.zkey
//!
//! Given a witness (and r1cs?) synthesizes the circom circuit
//! And then feeds it to the arkworks groth16 prover
//!
//! Outputs:
//! * public.json
//! * proof.json
//!
//! ## Smart Contract connector class
//!
//! Given an Arkworks proof, it's able to translate it to the Circom-verifier
//! expected arguments
//!
//! (No Dark Forest specific modifications included, these are part of df-snark)
//!
//! ## Binary
//!
//! CLIs for each of the above + logging to stdout
//!
//! witness for the specified inputs
//!
//! ## Commands
//!
//! Compile a circuit:
//! `circom circuit.circom --r1cs --wasm --sym`
//!
//! Phase2 over circuit + PoT
//! `snarkjs zkey new circuit.r1cs powersOfTau28_hez_final_10.ptau circuit_0000.zkey`
//! `snarkjs zkey contribute circuit_0000.zkey circuit_final.zkey`
//! `snarkjs zkey export verificationkey circuit_final.zkey verification_key.json`
//!
//! Witness calculation from inputs:
//! `snarkjs wtns calculate circuit.wasm input.json witness.wtns`
//! `snarkjs wtns export json witness.wtns witness.json`
//!
//! Groth16 proof calculation:
//! `snarkjs groth16 prove circuit_final.zkey witness.wtns proof.json public.json`
//!
//! Groth16 Proof verification:
//! `snarkjs groth16 verify verification_key.json public.json proof.json`
mod circom_wasm;
pub use circom_wasm::WitnessCalculator;
pub mod circuit;
pub use circuit::{CircomBuilder, CircomCircuit, CircuitConfig};
pub mod ethereum;
pub mod zkey;
pub mod circom_qap;

835
src/zkey.rs Normal file
View File

@@ -0,0 +1,835 @@
//! ZKey
//!
//! Each ZKey file is broken into sections:
//! Header(1)
//! Prover Type 1 Groth
//! HeaderGroth(2)
//! n8q
//! q
//! n8r
//! r
//! NVars
//! NPub
//! DomainSize (multiple of 2
//! alpha1
//! beta1
//! delta1
//! beta2
//! gamma2
//! delta2
//! IC(3)
//! Coefs(4)
//! PointsA(5)
//! PointsB1(6)
//! PointsB2(7)
//! PointsC(8)
//! PointsH(9)
//! Contributions(10)
use ark_ff::{BigInteger256, FromBytes};
use ark_serialize::{CanonicalDeserialize, SerializationError};
use ark_std::log2;
use byteorder::{LittleEndian, ReadBytesExt};
use std::{
collections::HashMap,
io::{Cursor, Read, Result as IoResult},
};
use ark_bn254::{Bn254, Fq, Fq2, G1Affine, G2Affine};
use ark_groth16::{ProvingKey, VerifyingKey};
use ark_serialize::CanonicalSerialize;
use num_traits::Zero;
#[derive(Clone, Debug)]
pub struct Section {
position: u64,
size: usize,
}
#[derive(Debug)]
pub struct BinFile<'a> {
ftype: String,
version: u32,
sections: HashMap<u32, Vec<Section>>,
reader: &'a mut Cursor<&'a [u8]>,
}
impl<'a> BinFile<'a> {
pub fn new(reader: &'a mut Cursor<&'a [u8]>) -> IoResult<Self> {
let mut magic = [0u8; 4];
reader.read_exact(&mut magic)?;
let version = reader.read_u32::<LittleEndian>()?;
let num_sections = reader.read_u32::<LittleEndian>()?;
let mut sections = HashMap::new();
for _ in 0..num_sections {
let section_id = reader.read_u32::<LittleEndian>()?;
let section_length = reader.read_u64::<LittleEndian>()?;
let section = sections.entry(section_id).or_insert_with(Vec::new);
section.push(Section {
position: reader.position(),
size: section_length as usize,
});
reader.set_position(reader.position() + section_length);
}
Ok(Self {
ftype: std::str::from_utf8(&magic[..]).unwrap().to_string(),
version,
sections,
reader,
})
}
pub fn proving_key(&mut self) -> IoResult<ProvingKey<Bn254>> {
let header = self.groth_header()?;
let ic = self.ic(header.n_public)?;
let a_query = self.a_query(header.n_vars)?;
let b_g1_query = self.b_g1_query(header.n_vars)?;
let b_g2_query = self.b_g2_query(header.n_vars)?;
let l_query = self.l_query(header.n_vars - header.n_public - 1)?;
let h_query = self.h_query(header.domain_size as usize)?;
let vk = VerifyingKey::<Bn254> {
alpha_g1: header.verifying_key.alpha_g1,
beta_g2: header.verifying_key.beta_g2,
gamma_g2: header.verifying_key.gamma_g2,
delta_g2: header.verifying_key.delta_g2,
gamma_abc_g1: ic,
};
let pk = ProvingKey::<Bn254> {
vk,
beta_g1: header.verifying_key.beta_g1,
delta_g1: header.verifying_key.delta_g1,
a_query,
b_g1_query,
b_g2_query,
h_query,
l_query,
};
Ok(pk)
}
fn get_section(&self, id: u32) -> Section {
self.sections.get(&id).unwrap()[0].clone()
}
pub fn groth_header(&mut self) -> IoResult<HeaderGroth> {
let section = self.get_section(2);
let header = HeaderGroth::new(&mut self.reader, &section)?;
Ok(header)
}
pub fn ic(&mut self, n_public: usize) -> IoResult<Vec<G1Affine>> {
// the range is non-inclusive so we do +1 to get all inputs
self.g1_section(n_public + 1, 3)
}
// Section 4 is the coefficients, we ignore it
pub fn a_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 5)
}
pub fn b_g1_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 6)
}
pub fn b_g2_query(&mut self, n_vars: usize) -> IoResult<Vec<G2Affine>> {
self.g2_section(n_vars, 7)
}
pub fn l_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 8)
}
pub fn h_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 9)
}
fn g1_section(&mut self, num: usize, section_id: usize) -> IoResult<Vec<G1Affine>> {
let section = self.get_section(section_id as u32);
deserialize_g1_vec(
&self.reader.get_ref()[section.position as usize..],
num as u32,
)
}
fn g2_section(&mut self, num: usize, section_id: usize) -> IoResult<Vec<G2Affine>> {
let section = self.get_section(section_id as u32);
deserialize_g2_vec(
&self.reader.get_ref()[section.position as usize..],
num as u32,
)
}
}
#[derive(Default, Clone, Debug, CanonicalDeserialize)]
pub struct ZVerifyingKey {
pub alpha_g1: G1Affine,
pub beta_g1: G1Affine,
pub beta_g2: G2Affine,
pub gamma_g2: G2Affine,
pub delta_g1: G1Affine,
pub delta_g2: G2Affine,
}
impl ZVerifyingKey {
fn new<R: Read>(reader: &mut R) -> IoResult<Self> {
let alpha_g1 = deserialize_g1(reader)?;
let beta_g1 = deserialize_g1(reader)?;
let beta_g2 = deserialize_g2(reader)?;
let gamma_g2 = deserialize_g2(reader)?;
let delta_g1 = deserialize_g1(reader)?;
let delta_g2 = deserialize_g2(reader)?;
Ok(Self {
alpha_g1,
beta_g1,
beta_g2,
gamma_g2,
delta_g1,
delta_g2,
})
}
}
#[derive(Clone, Debug)]
pub struct HeaderGroth {
pub n8q: u32,
pub q: BigInteger256,
pub n8r: u32,
pub r: BigInteger256,
pub n_vars: usize,
pub n_public: usize,
pub domain_size: u32,
pub power: u32,
pub verifying_key: ZVerifyingKey,
}
impl HeaderGroth {
pub fn new(reader: &mut Cursor<&[u8]>, section: &Section) -> IoResult<Self> {
reader.set_position(section.position);
Self::read(reader)
}
fn read<R: Read>(mut reader: &mut R) -> IoResult<Self> {
// TODO: Impl From<u32> in Arkworks
let n8q: u32 = FromBytes::read(&mut reader)?;
// group order r of Bn254
let q = BigInteger256::read(&mut reader)?;
let n8r: u32 = FromBytes::read(&mut reader)?;
// Prime field modulus
let r = BigInteger256::read(&mut reader)?;
let n_vars = u32::read(&mut reader)? as usize;
let n_public = u32::read(&mut reader)? as usize;
let domain_size: u32 = FromBytes::read(&mut reader)?;
let power = log2(domain_size as usize);
let verifying_key = ZVerifyingKey::new(&mut reader)?;
Ok(Self {
n8q,
q,
n8r,
r,
n_vars,
n_public,
domain_size,
power,
verifying_key,
})
}
}
// skips the multiplication by R because Circom points are already in Montgomery form
fn deserialize_field<R: Read>(reader: &mut R) -> IoResult<Fq> {
let bigint = BigInteger256::read(reader)?;
// if you use ark_ff::PrimeField::from_repr it multiplies by R
Ok(Fq::new(bigint))
}
fn deserialize_g1<R: Read>(reader: &mut R) -> IoResult<G1Affine> {
let x = deserialize_field(reader)?;
let y = deserialize_field(reader)?;
let infinity = x.is_zero() && y.is_zero();
Ok(G1Affine::new(x, y, infinity))
}
fn deserialize_g2<R: Read>(reader: &mut R) -> IoResult<G2Affine> {
let c0 = deserialize_field(reader)?;
let c1 = deserialize_field(reader)?;
let f1 = Fq2::new(c0, c1);
let c0 = deserialize_field(reader)?;
let c1 = deserialize_field(reader)?;
let f2 = Fq2::new(c0, c1);
let infinity = f1.is_zero() && f2.is_zero();
Ok(G2Affine::new(f1, f2, infinity))
}
fn deserialize_g1_vec(buf: &[u8], n_vars: u32) -> IoResult<Vec<G1Affine>> {
let size = G1Affine::zero().uncompressed_size();
let mut v = vec![];
for i in 0..n_vars as usize {
let el = deserialize_g1(&mut &buf[i * size..(i + 1) * size])?;
v.push(el);
}
Ok(v)
}
fn deserialize_g2_vec(buf: &[u8], n_vars: u32) -> IoResult<Vec<G2Affine>> {
let size = G2Affine::zero().uncompressed_size();
let mut v = vec![];
for i in 0..n_vars as usize {
let el = deserialize_g2(&mut &buf[i * size..(i + 1) * size])?;
v.push(el);
}
Ok(v)
}
#[cfg(test)]
mod tests {
use super::*;
use ark_bn254::{G1Projective, G2Projective};
use memmap::*;
use num_bigint::BigUint;
use serde_json::Value;
use std::fs::File;
use crate::{circom_qap::R1CStoQAPCircom, CircomBuilder, CircuitConfig};
use ark_groth16::{create_random_proof_with_qap as prove, prepare_verifying_key, verify_proof};
use ark_std::rand::thread_rng;
use num_traits::{One, Zero};
use std::str::FromStr;
use std::convert::TryFrom;
fn fq_from_str(s: &str) -> Fq {
BigInteger256::try_from(BigUint::from_str(s).unwrap())
.unwrap()
.into()
}
// Circom snarkjs code:
// console.log(curve.G1.F.one)
fn fq_buf() -> Vec<u8> {
vec![
157, 13, 143, 197, 141, 67, 93, 211, 61, 11, 199, 245, 40, 235, 120, 10, 44, 70, 121,
120, 111, 163, 110, 102, 47, 223, 7, 154, 193, 119, 10, 14,
]
}
// Circom snarkjs code:
// const buff = new Uint8Array(curve.G1.F.n8*2);
// curve.G1.toRprLEM(buff, 0, curve.G1.one);
// console.dir( buff, { 'maxArrayLength': null })
fn g1_buf() -> Vec<u8> {
vec![
157, 13, 143, 197, 141, 67, 93, 211, 61, 11, 199, 245, 40, 235, 120, 10, 44, 70, 121,
120, 111, 163, 110, 102, 47, 223, 7, 154, 193, 119, 10, 14, 58, 27, 30, 139, 27, 135,
186, 166, 123, 22, 142, 235, 81, 214, 241, 20, 88, 140, 242, 240, 222, 70, 221, 204,
94, 190, 15, 52, 131, 239, 20, 28,
]
}
// Circom snarkjs code:
// const buff = new Uint8Array(curve.G2.F.n8*2);
// curve.G2.toRprLEM(buff, 0, curve.G2.one);
// console.dir( buff, { 'maxArrayLength': null })
fn g2_buf() -> Vec<u8> {
vec![
38, 32, 188, 2, 209, 181, 131, 142, 114, 1, 123, 73, 53, 25, 235, 220, 223, 26, 129,
151, 71, 38, 184, 251, 59, 80, 150, 175, 65, 56, 87, 25, 64, 97, 76, 168, 125, 115,
180, 175, 196, 216, 2, 88, 90, 221, 67, 96, 134, 47, 160, 82, 252, 80, 233, 9, 107,
123, 234, 58, 131, 240, 254, 20, 246, 233, 107, 136, 157, 250, 157, 97, 120, 155, 158,
245, 151, 210, 127, 254, 254, 125, 27, 35, 98, 26, 158, 255, 6, 66, 158, 174, 235, 126,
253, 40, 238, 86, 24, 199, 86, 91, 9, 100, 187, 60, 125, 50, 34, 249, 87, 220, 118, 16,
53, 51, 190, 53, 249, 85, 130, 100, 253, 147, 230, 160, 164, 13,
]
}
// Circom logs in Projective coordinates: console.log(curve.G1.one)
fn g1_one() -> G1Affine {
let x = Fq::one();
let y = Fq::one() + Fq::one();
let z = Fq::one();
G1Affine::from(G1Projective::new(x, y, z))
}
// Circom logs in Projective coordinates: console.log(curve.G2.one)
fn g2_one() -> G2Affine {
let x = Fq2::new(
fq_from_str(
"10857046999023057135944570762232829481370756359578518086990519993285655852781",
),
fq_from_str(
"11559732032986387107991004021392285783925812861821192530917403151452391805634",
),
);
let y = Fq2::new(
fq_from_str(
"8495653923123431417604973247489272438418190587263600148770280649306958101930",
),
fq_from_str(
"4082367875863433681332203403145435568316851327593401208105741076214120093531",
),
);
let z = Fq2::new(Fq::one(), Fq::zero());
G2Affine::from(G2Projective::new(x, y, z))
}
#[test]
fn can_deser_fq() {
let buf = fq_buf();
let fq = deserialize_field(&mut &buf[..]).unwrap();
assert_eq!(fq, Fq::one());
}
#[test]
fn can_deser_g1() {
let buf = g1_buf();
assert_eq!(buf.len(), 64);
let g1 = deserialize_g1(&mut &buf[..]).unwrap();
let expected = g1_one();
assert_eq!(g1, expected);
}
#[test]
fn can_deser_g1_vec() {
let n_vars = 10;
let buf = vec![g1_buf(); n_vars]
.iter()
.cloned()
.flatten()
.collect::<Vec<_>>();
let expected = vec![g1_one(); n_vars];
let de = deserialize_g1_vec(&buf[..], n_vars as u32).unwrap();
assert_eq!(expected, de);
}
#[test]
fn can_deser_g2() {
let buf = g2_buf();
assert_eq!(buf.len(), 128);
let g2 = deserialize_g2(&mut &buf[..]).unwrap();
let expected = g2_one();
assert_eq!(g2, expected);
}
#[test]
fn can_deser_g2_vec() {
let n_vars = 10;
let buf = vec![g2_buf(); n_vars]
.iter()
.cloned()
.flatten()
.collect::<Vec<_>>();
let expected = vec![g2_one(); n_vars];
let de = deserialize_g2_vec(&buf[..], n_vars as u32).unwrap();
assert_eq!(expected, de);
}
#[test]
fn header() {
// `circom --r1cs` using the below file:
//
// template Multiplier() {
// signal private input a;
// signal private input b;
// signal output c;
//
// c <== a*b;
// }
//
// component main = Multiplier();
//
// Then:
// `snarkjs zkey new circuit.r1cs powersOfTau28_hez_final_10.ptau test.zkey`
let path = "./test-vectors/test.zkey";
let file = File::open(path).unwrap();
let map = unsafe {
MmapOptions::new()
.map(&file)
.expect("unable to create a memory map")
};
let mut reader = Cursor::new(map.as_ref());
let mut binfile = BinFile::new(&mut reader).unwrap();
let header = binfile.groth_header().unwrap();
assert_eq!(header.n_vars, 4);
assert_eq!(header.n_public, 1);
assert_eq!(header.domain_size, 4);
assert_eq!(header.power, 2);
}
#[test]
fn deser_key() {
let path = "./test-vectors/test.zkey";
let file = File::open(path).unwrap();
let map = unsafe {
MmapOptions::new()
.map(&file)
.expect("unable to create a memory map")
};
let mut reader = Cursor::new(map.as_ref());
let mut binfile = BinFile::new(&mut reader).unwrap();
let params = binfile.proving_key().unwrap();
// Check IC
let expected = vec![
deserialize_g1(
&mut &[
11, 205, 205, 176, 2, 105, 129, 243, 153, 58, 137, 89, 61, 95, 99, 161, 133,
201, 153, 192, 119, 19, 113, 136, 43, 105, 47, 206, 166, 55, 81, 22, 154, 77,
58, 119, 28, 230, 160, 206, 134, 98, 4, 115, 112, 184, 46, 117, 61, 180, 103,
138, 141, 202, 110, 252, 199, 252, 141, 211, 5, 46, 244, 10,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
118, 135, 198, 156, 63, 190, 210, 98, 194, 59, 169, 168, 204, 168, 76, 208,
109, 170, 24, 193, 57, 31, 184, 88, 234, 218, 118, 58, 107, 129, 90, 36, 230,
98, 62, 243, 3, 55, 68, 227, 117, 64, 188, 81, 81, 247, 161, 68, 68, 210, 142,
191, 174, 43, 110, 194, 253, 128, 217, 4, 54, 196, 111, 43,
][..],
)
.unwrap(),
];
assert_eq!(expected, params.vk.gamma_abc_g1);
// Check A Query
let expected = vec![
deserialize_g1(
&mut &[
240, 165, 110, 187, 72, 39, 218, 59, 128, 85, 50, 174, 229, 1, 86, 58, 125,
244, 145, 205, 248, 253, 120, 2, 165, 140, 154, 55, 220, 253, 14, 19, 212, 106,
59, 19, 125, 198, 202, 4, 59, 74, 14, 62, 20, 248, 219, 47, 234, 205, 54, 183,
33, 119, 165, 84, 46, 75, 39, 17, 229, 42, 192, 2,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
93, 53, 177, 82, 50, 5, 123, 116, 91, 35, 14, 196, 43, 180, 54, 15, 88, 144,
197, 105, 57, 167, 54, 5, 188, 109, 17, 89, 9, 223, 80, 1, 39, 193, 211, 168,
203, 119, 169, 105, 17, 156, 53, 106, 11, 102, 44, 92, 123, 220, 158, 240, 97,
253, 30, 121, 4, 236, 171, 23, 100, 34, 133, 11,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
177, 47, 21, 237, 244, 73, 76, 98, 80, 10, 10, 142, 80, 145, 40, 254, 100, 214,
103, 33, 38, 84, 238, 248, 252, 181, 75, 32, 109, 16, 93, 23, 135, 157, 206,
122, 107, 105, 202, 164, 197, 124, 242, 100, 70, 108, 9, 180, 224, 102, 250,
149, 130, 14, 133, 185, 132, 189, 193, 230, 180, 143, 156, 30,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
];
assert_eq!(expected, params.a_query);
// B G1 Query
let expected = vec![
deserialize_g1(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
177, 47, 21, 237, 244, 73, 76, 98, 80, 10, 10, 142, 80, 145, 40, 254, 100, 214,
103, 33, 38, 84, 238, 248, 252, 181, 75, 32, 109, 16, 93, 23, 192, 95, 174, 93,
171, 34, 86, 151, 199, 77, 127, 3, 75, 254, 119, 227, 124, 241, 134, 235, 51,
55, 203, 254, 164, 226, 111, 250, 189, 190, 199, 17,
][..],
)
.unwrap(),
];
assert_eq!(expected, params.b_g1_query);
// B G2 Query
let expected = vec![
deserialize_g2(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
deserialize_g2(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
deserialize_g2(
&mut &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
][..],
)
.unwrap(),
deserialize_g2(
&mut &[
240, 25, 157, 232, 164, 49, 152, 204, 244, 190, 178, 178, 29, 133, 205, 175,
172, 28, 12, 123, 139, 202, 196, 13, 67, 165, 204, 42, 74, 40, 6, 36, 112, 104,
61, 67, 107, 112, 72, 41, 213, 210, 249, 75, 89, 144, 144, 34, 177, 228, 18,
70, 80, 195, 124, 82, 40, 122, 91, 21, 198, 100, 154, 1, 16, 235, 41, 4, 176,
106, 9, 113, 141, 251, 100, 233, 188, 128, 194, 173, 0, 100, 206, 110, 53, 223,
163, 47, 166, 235, 25, 12, 151, 238, 45, 0, 78, 210, 56, 53, 57, 212, 67, 189,
253, 132, 62, 62, 116, 20, 235, 15, 245, 113, 30, 182, 33, 127, 203, 231, 124,
149, 74, 223, 39, 190, 217, 41,
][..],
)
.unwrap(),
];
assert_eq!(expected, params.b_g2_query);
// Check L Query
let expected = vec![
deserialize_g1(
&mut &[
146, 142, 29, 235, 9, 162, 84, 255, 6, 119, 86, 214, 154, 18, 12, 190, 202, 19,
168, 45, 29, 76, 174, 130, 6, 59, 146, 15, 229, 82, 81, 40, 50, 25, 124, 247,
129, 12, 147, 35, 108, 119, 178, 116, 238, 145, 33, 184, 74, 201, 128, 41, 151,
6, 60, 84, 156, 225, 200, 14, 240, 171, 128, 20,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
26, 32, 112, 226, 161, 84, 188, 236, 141, 226, 119, 169, 235, 218, 253, 176,
157, 184, 108, 243, 73, 122, 239, 217, 39, 190, 239, 105, 147, 190, 80, 47,
211, 68, 155, 212, 139, 173, 229, 160, 123, 117, 243, 110, 162, 188, 217, 206,
102, 19, 36, 189, 87, 183, 113, 8, 164, 133, 43, 142, 138, 109, 66, 33,
][..],
)
.unwrap(),
];
assert_eq!(expected, params.l_query);
// Check H Query
let expected = vec![
deserialize_g1(
&mut &[
21, 76, 104, 34, 28, 236, 135, 204, 218, 16, 160, 115, 185, 44, 19, 62, 43, 24,
57, 99, 207, 105, 10, 139, 195, 60, 17, 57, 85, 244, 167, 10, 166, 166, 165,
55, 38, 75, 116, 116, 182, 87, 217, 112, 28, 237, 239, 123, 231, 180, 122, 109,
77, 116, 88, 67, 102, 48, 80, 214, 137, 47, 94, 30,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
144, 175, 205, 119, 119, 192, 11, 10, 148, 224, 87, 161, 157, 231, 101, 208,
55, 15, 13, 16, 24, 59, 9, 22, 63, 215, 255, 30, 77, 188, 71, 37, 84, 227, 59,
29, 159, 116, 101, 93, 212, 220, 159, 141, 204, 107, 131, 87, 174, 149, 175,
72, 199, 109, 64, 109, 180, 150, 160, 249, 246, 33, 212, 29,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
129, 169, 52, 179, 66, 88, 123, 199, 222, 69, 24, 17, 219, 235, 118, 195, 156,
210, 14, 21, 76, 155, 178, 210, 223, 4, 233, 5, 8, 18, 156, 24, 82, 68, 183,
186, 7, 126, 2, 201, 207, 207, 74, 45, 44, 199, 16, 165, 25, 65, 157, 199, 90,
159, 12, 150, 250, 17, 177, 193, 244, 93, 230, 41,
][..],
)
.unwrap(),
deserialize_g1(
&mut &[
207, 61, 229, 214, 21, 61, 103, 165, 93, 145, 54, 138, 143, 214, 5, 83, 183,
22, 174, 87, 108, 59, 99, 96, 19, 20, 25, 139, 114, 238, 198, 40, 182, 88, 1,
255, 206, 132, 156, 165, 178, 171, 0, 226, 179, 30, 192, 4, 79, 198, 69, 43,
145, 133, 116, 86, 36, 144, 190, 119, 79, 241, 76, 16,
][..],
)
.unwrap(),
];
assert_eq!(expected, params.h_query);
}
#[test]
fn deser_vk() {
let path = "./test-vectors/test.zkey";
let file = File::open(path).unwrap();
let map = unsafe {
MmapOptions::new()
.map(&file)
.expect("unable to create a memory map")
};
let mut reader = Cursor::new(map.as_ref());
let mut binfile = BinFile::new(&mut reader).unwrap();
let params = binfile.proving_key().unwrap();
let json = std::fs::read_to_string("./test-vectors/verification_key.json").unwrap();
let json: Value = serde_json::from_str(&json).unwrap();
assert_eq!(json_to_g1(&json, "vk_alpha_1"), params.vk.alpha_g1);
assert_eq!(json_to_g2(&json, "vk_beta_2"), params.vk.beta_g2);
assert_eq!(json_to_g2(&json, "vk_gamma_2"), params.vk.gamma_g2);
assert_eq!(json_to_g2(&json, "vk_delta_2"), params.vk.delta_g2);
assert_eq!(json_to_g1_vec(&json, "IC"), params.vk.gamma_abc_g1);
}
fn json_to_g1(json: &Value, key: &str) -> G1Affine {
let els: Vec<String> = json
.get(key)
.unwrap()
.as_array()
.unwrap()
.iter()
.map(|i| i.as_str().unwrap().to_string())
.collect();
G1Affine::from(G1Projective::new(
fq_from_str(&els[0]),
fq_from_str(&els[1]),
fq_from_str(&els[2]),
))
}
fn json_to_g1_vec(json: &Value, key: &str) -> Vec<G1Affine> {
let els: Vec<Vec<String>> = json
.get(key)
.unwrap()
.as_array()
.unwrap()
.iter()
.map(|i| {
i.as_array()
.unwrap()
.iter()
.map(|x| x.as_str().unwrap().to_string())
.collect::<Vec<String>>()
})
.collect();
els.iter()
.map(|coords| {
G1Affine::from(G1Projective::new(
fq_from_str(&coords[0]),
fq_from_str(&coords[1]),
fq_from_str(&coords[2]),
))
})
.collect()
}
fn json_to_g2(json: &Value, key: &str) -> G2Affine {
let els: Vec<Vec<String>> = json
.get(key)
.unwrap()
.as_array()
.unwrap()
.iter()
.map(|i| {
i.as_array()
.unwrap()
.iter()
.map(|x| x.as_str().unwrap().to_string())
.collect::<Vec<String>>()
})
.collect();
let x = Fq2::new(fq_from_str(&els[0][0]), fq_from_str(&els[0][1]));
let y = Fq2::new(fq_from_str(&els[1][0]), fq_from_str(&els[1][1]));
let z = Fq2::new(fq_from_str(&els[2][0]), fq_from_str(&els[2][1]));
G2Affine::from(G2Projective::new(x, y, z))
}
#[test]
fn verify_proof_with_zkey() {
let path = "./test-vectors/test.zkey";
let file = File::open(path).unwrap();
let map = unsafe {
MmapOptions::new()
.map(&file)
.expect("unable to create a memory map")
};
let mut reader = Cursor::new(map.as_ref());
let mut binfile = BinFile::new(&mut reader).unwrap();
let params = binfile.proving_key().unwrap();
let cfg = CircuitConfig::<Bn254>::new(
"./test-vectors/mycircuit.wasm",
"./test-vectors/mycircuit.r1cs",
)
.unwrap();
let mut builder = CircomBuilder::new(cfg);
builder.push_input("a", 3);
builder.push_input("b", 11);
let circom = builder.build().unwrap();
let inputs = circom.get_public_inputs().unwrap();
let mut rng = thread_rng();
let proof = prove::<_, _, _, R1CStoQAPCircom>(circom, &params, &mut rng).unwrap();
let pvk = prepare_verifying_key(&params.vk);
let verified = verify_proof(&pvk, &proof, &inputs).unwrap();
assert!(verified);
}
}