Browse Source

Merge 0620ef5448 into c6d42a8356

pull/1/merge
Silur 1 year ago
committed by GitHub
parent
commit
651a05e9e0
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 44 additions and 49 deletions
  1. +1
    -1
      Cargo.toml
  2. +43
    -48
      src/protogalaxy.rs

+ 1
- 1
Cargo.toml

@ -13,6 +13,6 @@ ark-poly = "0.4.0"
ark-serialize = { version = "0.4.0", default-features = false, features = [ "derive" ] } ark-serialize = { version = "0.4.0", default-features = false, features = [ "derive" ] }
rand = { version = "0.8", features = [ "std", "std_rng" ] } rand = { version = "0.8", features = [ "std", "std_rng" ] }
ark-crypto-primitives = { version = "^0.4.0", default-features = false, features = [ "r1cs", "snark", "sponge", "crh" ] } ark-crypto-primitives = { version = "^0.4.0", default-features = false, features = [ "r1cs", "snark", "sponge", "crh" ] }
getrandom = { version = "0.2.10", features = ["js"] }
[dev-dependencies] [dev-dependencies]
ark-bls12-381 = "0.4.0" ark-bls12-381 = "0.4.0"

+ 43
- 48
src/protogalaxy.rs

@ -4,7 +4,6 @@ use ark_ff::fields::PrimeField;
use ark_std::log2; use ark_std::log2;
use ark_std::{cfg_into_iter, Zero}; use ark_std::{cfg_into_iter, Zero};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::Add;
use ark_poly::{ use ark_poly::{
univariate::{DensePolynomial, SparsePolynomial}, univariate::{DensePolynomial, SparsePolynomial},
@ -65,12 +64,9 @@ where
let f_w = eval_f(r1cs, &w.w); let f_w = eval_f(r1cs, &w.w);
// F(X) // F(X)
let mut F_X: SparsePolynomial<C::ScalarField> = SparsePolynomial::zero();
for (i, f_w_i) in f_w.iter().enumerate() {
let lhs = pow_i_over_x::<C::ScalarField>(i, &instance.betas, &deltas);
let curr = &lhs * *f_w_i;
F_X = F_X.add(curr);
}
let F_X: SparsePolynomial<C::ScalarField> =
calc_f_from_btree(&f_w, &instance.betas, &deltas);
let F_X_dense = DensePolynomial::from(F_X.clone()); let F_X_dense = DensePolynomial::from(F_X.clone());
transcript.add_vec(&F_X_dense.coeffs); transcript.add_vec(&F_X_dense.coeffs);
@ -287,24 +283,6 @@ fn pow_i(i: usize, betas: &Vec) -> F {
r r
} }
fn pow_i_over_x<F: PrimeField>(i: usize, betas: &Vec<F>, deltas: &Vec<F>) -> SparsePolynomial<F> {
assert_eq!(betas.len(), deltas.len());
let n = 2_u64.pow(betas.len() as u32);
let b = bit_decompose(i as u64, n as usize);
let mut r: SparsePolynomial<F> =
SparsePolynomial::<F>::from_coefficients_vec(vec![(0, F::one())]); // start with r(x) = 1
for (j, beta_j) in betas.iter().enumerate() {
if b[j] {
let curr: SparsePolynomial<F> =
SparsePolynomial::<F>::from_coefficients_vec(vec![(0, *beta_j), (1, deltas[j])]);
r = r.mul(&curr);
}
}
r
}
// lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300 // lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300
fn lagrange_polys<F: PrimeField>(domain_n: GeneralEvaluationDomain<F>) -> Vec<DensePolynomial<F>> { fn lagrange_polys<F: PrimeField>(domain_n: GeneralEvaluationDomain<F>) -> Vec<DensePolynomial<F>> {
let mut lagrange_polynomials: Vec<DensePolynomial<F>> = Vec::new(); let mut lagrange_polynomials: Vec<DensePolynomial<F>> = Vec::new();
@ -317,6 +295,46 @@ fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec
lagrange_polynomials lagrange_polynomials
} }
fn calc_f_from_btree<F: PrimeField>(fw: &[F], betas: &[F], deltas: &[F]) -> SparsePolynomial<F> {
assert_eq!(fw.len() & (fw.len() - 1), 0);
assert_eq!(betas.len(), deltas.len());
let mut layers: Vec<Vec<SparsePolynomial<F>>> = Vec::new();
let leaves: Vec<SparsePolynomial<F>> = fw
.iter()
.enumerate()
.map(|e| SparsePolynomial::<F>::from_coefficients_slice(&[(0, *e.1)]))
.collect();
layers.push(leaves.to_vec());
let mut currentNodes = leaves.clone();
while currentNodes.len() > 1 {
let index = layers.len();
let limit: usize = (2 * currentNodes.len())
- 2usize.pow(
(currentNodes.len() & (currentNodes.len() - 1))
.try_into()
.unwrap(),
);
layers.push(vec![]);
for (i, ni) in currentNodes.iter().enumerate().step_by(2) {
if i >= limit {
layers[index] = currentNodes[0..limit].to_vec();
break;
}
let left = ni.clone();
let right = SparsePolynomial::<F>::from_coefficients_vec(vec![
(0, betas[layers.len() - 2]), // FIXME
(1, deltas[layers.len() - 2]), // FIXME
])
.mul(&currentNodes[i + 1]);
layers[index].push(left + right);
}
currentNodes = layers[index].clone();
}
let root_index = layers.len() - 1;
layers[root_index][0].clone()
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct R1CS<F: PrimeField> { pub struct R1CS<F: PrimeField> {
pub A: Vec<Vec<F>>, pub A: Vec<Vec<F>>,
@ -445,29 +463,6 @@ mod tests {
} }
} }
#[test]
fn test_pow_i_over_x() {
let mut rng = ark_std::test_rng();
let t = 3;
let n = 8;
let beta = Fr::rand(&mut rng);
let delta = Fr::rand(&mut rng);
let betas = powers_of_beta(beta, t);
let deltas = powers_of_beta(delta, t);
// compute b + X*d, with X=rand
let x = Fr::rand(&mut rng);
let bxd = vec_add(&betas, &vec_scalar_mul(&deltas, &x));
// assert that computing pow_over_x of betas,deltas, is equivalent to first computing the
// vector [betas+X*deltas] and then computing pow_i over it
for i in 0..n {
let pow_i1 = pow_i_over_x(i, &betas, &deltas);
let pow_i2 = pow_i(i, &bxd);
assert_eq!(pow_i1.evaluate(&x), pow_i2);
}
}
#[test] #[test]
fn test_eval_f() { fn test_eval_f() {
let r1cs = get_test_r1cs::<Fr>(); let r1cs = get_test_r1cs::<Fr>();

Loading…
Cancel
Save