Browse Source

chore: cleanup

main
Daniel Tehrani 2 years ago
parent
commit
a465129225
13 changed files with 44 additions and 59 deletions
  1. +2
    -1
      Cargo.toml
  2. +1
    -0
      rust-toolchain
  3. +0
    -1
      shockwave_plus/src/lib.rs
  4. +3
    -0
      shockwave_plus/src/polynomial/ml_poly.rs
  5. +19
    -2
      shockwave_plus/src/r1cs/r1cs.rs
  6. +0
    -20
      shockwave_plus/src/utils.rs
  7. +15
    -16
      tensor_pcs/src/fft.rs
  8. +1
    -0
      tensor_pcs/src/lib.rs
  9. +1
    -1
      tensor_pcs/src/tensor_code.rs
  10. +1
    -3
      tensor_pcs/src/tensor_pcs.rs
  11. +1
    -2
      tensor_pcs/src/transcript.rs
  12. +0
    -4
      tensor_pcs/src/tree.rs
  13. +0
    -9
      tensor_pcs/src/utils.rs

+ 2
- 1
Cargo.toml

@ -2,4 +2,5 @@
members = [
"tensor_pcs",
"shockwave_plus"
]
]
resolver = "2"

+ 1
- 0
rust-toolchain

@ -0,0 +1 @@
1.71.0

+ 0
- 1
shockwave_plus/src/lib.rs

@ -2,7 +2,6 @@
mod polynomial;
mod r1cs;
mod sumcheck;
mod utils;
use ark_std::{end_timer, start_timer};
use serde::{Deserialize, Serialize};

+ 3
- 0
shockwave_plus/src/polynomial/ml_poly.rs

@ -9,12 +9,14 @@ pub struct MlPoly {
}
impl<F: FieldExt> MlPoly<F> {
#[allow(dead_code)]
pub fn new(evals: Vec<F>) -> Self {
assert!(evals.len().is_power_of_two());
let num_vars = (evals.len() as f64).log2() as usize;
Self { evals, num_vars }
}
#[allow(dead_code)]
fn dot_prod(x: &[F], y: &[F]) -> F {
assert_eq!(x.len(), y.len());
let mut result = F::ZERO;
@ -27,6 +29,7 @@ impl MlPoly {
// Evaluate the multilinear extension of the polynomial `a`, at point `t`.
// `a` is in evaluation form.
// `t` should be in big-endian form.
#[allow(dead_code)]
pub fn eval(&self, t: &[F]) -> F {
let n = self.evals.len();
debug_assert_eq!((n as f64).log2() as usize, t.len());

+ 19
- 2
shockwave_plus/src/r1cs/r1cs.rs

@ -256,12 +256,29 @@ mod tests {
use halo2curves::ff::Field;
use crate::utils::boolean_hypercube;
use super::*;
type F = halo2curves::secp256k1::Fp;
use crate::polynomial::ml_poly::MlPoly;
// Returns a vector of vectors of length m, where each vector is a boolean vector (big endian)
fn boolean_hypercube<F: FieldExt>(m: usize) -> Vec<Vec<F>> {
let n = 2usize.pow(m as u32);
let mut boolean_hypercube = Vec::<Vec<F>>::with_capacity(n);
for i in 0..n {
let mut tmp = Vec::with_capacity(m);
for j in 0..m {
let i_b = F::from((i >> j & 1) as u64);
tmp.push(i_b);
}
tmp.reverse();
boolean_hypercube.push(tmp);
}
boolean_hypercube
}
#[test]
fn test_r1cs() {
let num_cons = 10;

+ 0
- 20
shockwave_plus/src/utils.rs

@ -1,20 +0,0 @@
use crate::FieldExt;
// Returns a vector of vectors of length m, where each vector is a boolean vector (big endian)
pub fn boolean_hypercube<F: FieldExt>(m: usize) -> Vec<Vec<F>> {
let n = 2usize.pow(m as u32);
let mut boolean_hypercube = Vec::<Vec<F>>::with_capacity(n);
for i in 0..n {
let mut tmp = Vec::with_capacity(m);
for j in 0..m {
let i_b = F::from((i >> j & 1) as u64);
tmp.push(i_b);
}
tmp.reverse();
boolean_hypercube.push(tmp);
}
boolean_hypercube
}

+ 15
- 16
tensor_pcs/src/fft.rs

@ -1,6 +1,4 @@
use crate::FieldExt;
use halo2curves::ff::Field;
use std::vec;
pub fn fft<F>(coeffs: &[F], domain: &[F]) -> Vec<F>
where
@ -11,7 +9,6 @@ where
return coeffs.to_vec();
}
// TODO: Just borrow the values
// Split into evens and odds
let L = coeffs
.iter()
@ -46,24 +43,26 @@ where
return evals_L;
}
pub fn ifft<F: FieldExt + Field>(domain: &[F], evals: &[F]) -> Vec<F> {
let mut coeffs = vec![];
let len_mod_inv = F::from(domain.len() as u64).invert().unwrap();
let vals = fft(&evals, &domain);
coeffs.push(vals[0] * len_mod_inv);
for val in vals[1..].iter().rev() {
coeffs.push(*val * len_mod_inv);
}
coeffs
}
#[cfg(test)]
mod tests {
use halo2curves::ff::Field;
use halo2curves::ff::PrimeField;
use halo2curves::pasta::Fp;
// Test the fft function by running the inverse fft
fn ifft<F: FieldExt + Field>(domain: &[F], evals: &[F]) -> Vec<F> {
let mut coeffs = vec![];
let len_mod_inv = F::from(domain.len() as u64).invert().unwrap();
let vals = fft(&evals, &domain);
coeffs.push(vals[0] * len_mod_inv);
for val in vals[1..].iter().rev() {
coeffs.push(*val * len_mod_inv);
}
coeffs
}
use super::*;
#[test]
fn test_fft_ifft() {

+ 1
- 0
tensor_pcs/src/lib.rs

@ -1,3 +1,4 @@
#![allow(non_snake_case)]
mod fft;
mod polynomial;
pub mod rs_config;

+ 1
- 1
tensor_pcs/src/tensor_code.rs

@ -32,7 +32,7 @@ pub struct CommittedTensorCode {
}
impl<F: FieldExt> CommittedTensorCode<F> {
pub fn query_column(&self, column: usize, num_cols: usize) -> Vec<F> {
pub fn query_column(&self, column: usize) -> Vec<F> {
let num_rows = self.tensor_codeword.0.len();
let leaves =

+ 1
- 3
tensor_pcs/src/tensor_pcs.rs

@ -297,14 +297,12 @@ impl TensorMultilinearPCS {
}
fn test_phase(&self, indices: &[usize], u_hat_comm: &CommittedTensorCode<F>) -> Vec<Vec<F>> {
let num_cols = self.config.num_cols() * 2;
// Query the columns of u_hat
let num_indices = self.config.l;
let u_hat_openings = indices
.iter()
.map(|index| u_hat_comm.query_column(*index, num_cols))
.map(|index| u_hat_comm.query_column(*index))
.collect::<Vec<Vec<F>>>();
debug_assert_eq!(u_hat_openings.len(), num_indices);

+ 1
- 2
tensor_pcs/src/transcript.rs

@ -1,7 +1,6 @@
use crate::FieldExt;
use halo2curves::ff::PrimeField;
use merlin::Transcript as MerlinTranscript;
use std::{io::Repeat, marker::PhantomData, panic::UnwindSafe};
use std::marker::PhantomData;
#[derive(Clone)]
pub struct Transcript<F: FieldExt> {

+ 0
- 4
tensor_pcs/src/tree.rs

@ -1,7 +1,3 @@
use core::num;
use std::marker::PhantomData;
use super::utils::hash_two;
use crate::{utils::hash_all, FieldExt};
use serde::{Deserialize, Serialize};

+ 0
- 9
tensor_pcs/src/utils.rs

@ -23,15 +23,6 @@ pub fn dot_prod(x: &[F], y: &[F]) -> F {
result
}
pub fn hash_two(values: &[[u8; 32]; 2]) -> [u8; 32] {
let mut hasher = Keccak::v256();
hasher.update(&values[0]);
hasher.update(&values[1]);
let mut hash = [0u8; 32];
hasher.finalize(&mut hash);
hash
}
pub fn hash_all(values: &[[u8; 32]]) -> [u8; 32] {
let mut hasher = Keccak::v256();
for value in values {

Loading…
Cancel
Save