From 20b437f842032767f1b52e6f89366cba79d758ca Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 6 Dec 2022 08:25:48 +0100 Subject: [PATCH 1/8] feat: blake3 benchmarks --- benches/hash.rs | 63 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/benches/hash.rs b/benches/hash.rs index 9460568..ef0680d 100644 --- a/benches/hash.rs +++ b/benches/hash.rs @@ -1,9 +1,13 @@ use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; use miden_crypto::{ - hash::rpo::{Rpo256, RpoDigest}, + hash::{ + blake::Blake3_256, + rpo::{Rpo256, RpoDigest}, + }, Felt, }; use rand_utils::rand_value; +use winter_crypto::Hasher; fn rpo256_2to1(c: &mut Criterion) { let v: [RpoDigest; 2] = [Rpo256::hash(&[1_u8]), Rpo256::hash(&[2_u8])]; @@ -53,5 +57,60 @@ fn rpo256_sequential(c: &mut Criterion) { }); } -criterion_group!(hash_group, rpo256_sequential, rpo256_2to1); +fn blake3_2to1(c: &mut Criterion) { + let v: [::Digest; 2] = + [Blake3_256::hash(&[1_u8]), Blake3_256::hash(&[2_u8])]; + c.bench_function("Blake3 2-to-1 hashing (cached)", |bench| { + bench.iter(|| Blake3_256::merge(black_box(&v))) + }); + + c.bench_function("Blake3 2-to-1 hashing (random)", |bench| { + bench.iter_batched( + || { + [ + Blake3_256::hash(&rand_value::().to_le_bytes()), + Blake3_256::hash(&rand_value::().to_le_bytes()), + ] + }, + |state| Blake3_256::merge(&state), + BatchSize::SmallInput, + ) + }); +} + +fn blake3_sequential(c: &mut Criterion) { + let v: [Felt; 100] = (0..100) + .into_iter() + .map(Felt::new) + .collect::>() + .try_into() + .expect("should not fail"); + c.bench_function("Blake3 sequential hashing (cached)", |bench| { + bench.iter(|| Blake3_256::hash_elements(black_box(&v))) + }); + + c.bench_function("Blake3 sequential hashing (random)", |bench| { + bench.iter_batched( + || { + let v: [Felt; 100] = (0..100) + .into_iter() + .map(|_| Felt::new(rand_value())) + .collect::>() + .try_into() + .expect("should not fail"); + v + }, + |state| Blake3_256::hash_elements(&state), + BatchSize::SmallInput, + ) + }); +} + +criterion_group!( + hash_group, + rpo256_2to1, + rpo256_sequential, + blake3_2to1, + blake3_sequential +); criterion_main!(hash_group); From 9782992662c1c01e89d0c55de220a1235877abdc Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Fri, 9 Dec 2022 13:51:16 -0800 Subject: [PATCH 2/8] feat: improve blake3 sequential hashing performance --- src/hash/blake/mod.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/hash/blake/mod.rs b/src/hash/blake/mod.rs index 6ffbfb5..b596ab9 100644 --- a/src/hash/blake/mod.rs +++ b/src/hash/blake/mod.rs @@ -276,13 +276,11 @@ where let digest = if Felt::IS_CANONICAL { blake3::hash(E::elements_as_bytes(elements)) } else { - E::as_base_elements(elements) - .iter() - .fold(blake3::Hasher::new(), |mut hasher, felt| { - hasher.update(&felt.as_int().to_le_bytes()); - hasher - }) - .finalize() + let mut hasher = blake3::Hasher::new(); + for element in E::as_base_elements(elements) { + hasher.update(&element.as_int().to_le_bytes()); + } + hasher.finalize() }; *shrink_bytes(&digest.into()) } From aa4e31369017c6093b171f4302c6908751b43575 Mon Sep 17 00:00:00 2001 From: Anjan Roy Date: Sat, 10 Dec 2022 11:48:19 +0400 Subject: [PATCH 3/8] chg: first convert all elements to little endian bytes and then consume them in a single call to blake3 hasher Signed-off-by: Anjan Roy --- src/hash/blake/mod.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/hash/blake/mod.rs b/src/hash/blake/mod.rs index b596ab9..e20391b 100644 --- a/src/hash/blake/mod.rs +++ b/src/hash/blake/mod.rs @@ -276,10 +276,15 @@ where let digest = if Felt::IS_CANONICAL { blake3::hash(E::elements_as_bytes(elements)) } else { - let mut hasher = blake3::Hasher::new(); - for element in E::as_base_elements(elements) { - hasher.update(&element.as_int().to_le_bytes()); + let blen = elements.len() << 3; + let mut bytes = vec![0u8; blen]; + + for (idx, element) in E::as_base_elements(elements).iter().enumerate() { + bytes[idx * 8..(idx + 1) * 8].copy_from_slice(&element.as_int().to_le_bytes()); } + + let mut hasher = blake3::Hasher::new(); + hasher.update(&bytes); hasher.finalize() }; *shrink_bytes(&digest.into()) From 0d713af4acc120f40259b2a0faaf0b67c5a794e2 Mon Sep 17 00:00:00 2001 From: Anjan Roy Date: Sat, 10 Dec 2022 12:49:38 +0400 Subject: [PATCH 4/8] chg: don't assume that default features are available on all targets Signed-off-by: Anjan Roy --- src/hash/blake/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/hash/blake/mod.rs b/src/hash/blake/mod.rs index e20391b..c1ea386 100644 --- a/src/hash/blake/mod.rs +++ b/src/hash/blake/mod.rs @@ -5,6 +5,7 @@ use core::{ ops::Deref, slice::from_raw_parts, }; +use winter_utils::collections::Vec; #[cfg(test)] mod tests; @@ -277,7 +278,12 @@ where blake3::hash(E::elements_as_bytes(elements)) } else { let blen = elements.len() << 3; - let mut bytes = vec![0u8; blen]; + + let mut bytes = Vec::with_capacity(blen); + #[allow(clippy::uninit_vec)] + unsafe { + bytes.set_len(blen) + } for (idx, element) in E::as_base_elements(elements).iter().enumerate() { bytes[idx * 8..(idx + 1) * 8].copy_from_slice(&element.as_int().to_le_bytes()); From b4f9d6098170a70ff334e0f638d7540608176c60 Mon Sep 17 00:00:00 2001 From: Anjan Roy Date: Mon, 12 Dec 2022 09:49:33 +0400 Subject: [PATCH 5/8] chg: don't assume we're only working with base field elements, consider extension field elements too See https://github.com/0xPolygonMiden/crypto/pull/29#discussion_r1045108928 where it was suggested. Signed-off-by: Anjan Roy --- src/hash/blake/mod.rs | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/hash/blake/mod.rs b/src/hash/blake/mod.rs index c1ea386..488c8ba 100644 --- a/src/hash/blake/mod.rs +++ b/src/hash/blake/mod.rs @@ -1,11 +1,12 @@ use super::{Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField}; -use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; +use crate::utils::{ + uninit_vector, ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, +}; use core::{ mem::{size_of, transmute, transmute_copy}, ops::Deref, slice::from_raw_parts, }; -use winter_utils::collections::Vec; #[cfg(test)] mod tests; @@ -277,21 +278,15 @@ where let digest = if Felt::IS_CANONICAL { blake3::hash(E::elements_as_bytes(elements)) } else { - let blen = elements.len() << 3; - - let mut bytes = Vec::with_capacity(blen); - #[allow(clippy::uninit_vec)] - unsafe { - bytes.set_len(blen) - } + let base_elements = E::as_base_elements(elements); + let blen = base_elements.len() << 3; - for (idx, element) in E::as_base_elements(elements).iter().enumerate() { + let mut bytes = unsafe { uninit_vector(blen) }; + for (idx, element) in base_elements.iter().enumerate() { bytes[idx * 8..(idx + 1) * 8].copy_from_slice(&element.as_int().to_le_bytes()); } - let mut hasher = blake3::Hasher::new(); - hasher.update(&bytes); - hasher.finalize() + blake3::hash(&bytes) }; *shrink_bytes(&digest.into()) } From c728423902a173a695c93cafc5b80e5612b7c2b2 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Thu, 8 Dec 2022 11:41:23 +0100 Subject: [PATCH 6/8] doc: benchmark hash functions doc: benchmark hash functions fix nits fix: misc. nits fix: nits and link fix additional nits fix: nits --- README.md | 8 ++++---- benches/README.md | 52 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 4 deletions(-) create mode 100644 benches/README.md diff --git a/README.md b/README.md index b2edddd..1527df0 100644 --- a/README.md +++ b/README.md @@ -2,24 +2,24 @@ This crate contains cryptographic primitives used in Polygon Miden. ## Hash -[Hash module](./src/hash) provides a set of cryptographic hash functions which are used by Miden VM and Miden Rollup. Currently, these functions are: +[Hash module](./src/hash) provides a set of cryptographic hash functions which are used by the Miden VM and the Miden rollup. Currently, these functions are: * [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3. * [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs. ## Merkle -[Merkle module](./src/merkle/) provides a set of data structures related to Merkle tree. All these data structures are implemented using RPO hash function described above. The data structure are: +[Merkle module](./src/merkle/) provides a set of data structures related to Merkle trees. All these data structures are implemented using the RPO hash function described above. The data structures are: * `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64. * `MerklePathSet`: a collection of Merkle authentication paths all resolving to the same root. The length of the paths can be at most 64. ## Crate features -This carate can be compiled with the following features: +This crate can be compiled with the following features: * `std` - enabled by default and relies on the Rust standard library. * `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly. -Both of these features imply use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections. +Both of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections. To compile with `no_std`, disable default features via `--no-default-features` flag. diff --git a/benches/README.md b/benches/README.md new file mode 100644 index 0000000..859ad73 --- /dev/null +++ b/benches/README.md @@ -0,0 +1,52 @@ +# Miden VM Hash Functions +In the Miden VM, we make use of different hash functions. Some of these are "traditional" hash functions, like `BLAKE3`, which are optimized for out-of-STARK performance, while others are algebraic hash functions, like `Rescue Prime`, and are more optimized for a better performance inside the STARK. In what follows, we benchmark several such hash functions and compare against other constructions that are used by other proving systems. More precisely, we benchmark: + +* **Rescue Prime:** +As specified [here](https://eprint.iacr.org/2020/1143) and implemented [here](https://github.com/novifinancial/winterfell/blob/46dce1adf0/crypto/src/hash/rescue/rp64_256/mod.rs). + +* **Rescue Prime Optimized:** +As specified [here](https://eprint.iacr.org/2022/1577) and implemented in this crate. + +* **BLAKE3:** +As specified [here](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) and implemented in this crate. + +* **SHA3:** +As specified [here](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) and implemented [here](https://github.com/novifinancial/winterfell/blob/46dce1adf0/crypto/src/hash/sha/mod.rs). + +* **Poseidon:** +As specified [here](https://eprint.iacr.org/2019/458.pdf) and implemented (in pure Rust, without vectorized instructions) [here](https://github.com/mir-protocol/plonky2/blob/main/plonky2/src/hash/poseidon_goldilocks.rs). + +## Comparison and Instructions + +### Comparison +We benchmark the above hash functions using two scenarios. The first is a 2-to-1 $(a,b)\mapsto h(a,b)$ hashing where both $a$, $b$ and $h(a,b)$ are the digests corresponding to each of the hash functions. +The second scenario is that of sequential hashing where we take a sequence of length $100$ field elements and hash these to produce a single digest. The digests are $4$ field elements (i.e. 256-bit) for Poseidon, Rescue Prime and RPO, and an array `[u8;32]` for SHA3 and BLAKE3. + +#### Scenario 1: 2-to-1 hashing `h(a,b)` + +| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | +| ----------------- | ------ | --------| --------- | --------- | ------- | +| Apple M1 Pro | 80 ns | 245 ns | 1.3 us | 9.1 us | 5.4 us | +| Apple M2 | 76 ns | 233 ns | 1.2 us | 7.9 us | 5.0 us | +| AMD Ryzen 9 5950X | 64 ns | 273 ns | 1.2 us | 9.1 us | 5.5 us | + +#### Scenario 2: Sequential hashing of 100 elements `h([a_0,...,a_99])` + +| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | +| ----------------- | -------| ------- | --------- | --------- | ------- | +| Apple M1 Pro | 1.1 us | 1.5 us | 17.3 us | 118 us | 70 us | +| Apple M2 | 1.0 us | 1.5 us | 15.5 us | 103 us | 65 us | +| AMD Ryzen 9 5950X | 0.8 us | 1.7 us | 15.7 us | 120 us | 72 us | + +### Instructions +Before you can run the benchmarks, you'll need to make sure you have Rust [installed](https://www.rust-lang.org/tools/install). After that, to run the benchmarks for RPO and BLAKE3, clone the current repository, and from the root directory of the repo run the following: + + ``` + cargo bench --bench hash + ``` + +To run the benchmarks for Rescue Prime, Poseidon and SHA3, clone the following [repository](https://github.com/Dominik1999/winterfell.git) as above, then checkout the `hash-functions-benches` branch, and from the root directory of the repo run the following: + +``` +cargo bench --bench hash +``` \ No newline at end of file From 5fd0d692e8c4af6612ffc291abc261ae2bcc7337 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Thu, 8 Dec 2022 17:13:17 +0100 Subject: [PATCH 7/8] feat: add simple sparse merkle tree This commit moves the previous implementation of `SparseMerkleTree` from miden-core to this crate. It also include a couple of new tests, a bench suite, and a couple of minor fixes. The original API was preserved to maintain compatibility with `AdviceTape`. closes #21 --- Cargo.toml | 6 +- benches/smt.rs | 84 ++++++++++ src/lib.rs | 5 +- src/merkle/merkle_tree.rs | 13 +- src/merkle/mod.rs | 36 ++++- src/merkle/simple_smt/mod.rs | 269 +++++++++++++++++++++++++++++++++ src/merkle/simple_smt/tests.rs | 263 ++++++++++++++++++++++++++++++++ 7 files changed, 666 insertions(+), 10 deletions(-) create mode 100644 benches/smt.rs create mode 100644 src/merkle/simple_smt/mod.rs create mode 100644 src/merkle/simple_smt/tests.rs diff --git a/Cargo.toml b/Cargo.toml index dc7367b..455dddc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,10 @@ edition = "2021" name = "hash" harness = false +[[bench]] +name = "smt" +harness = false + [features] default = ["blake3/default", "std", "winter_crypto/default", "winter_math/default", "winter_utils/default"] std = ["blake3/std", "winter_crypto/std", "winter_math/std", "winter_utils/std"] @@ -25,6 +29,6 @@ winter_math = { version = "0.4.1", package = "winter-math", default-features = f winter_utils = { version = "0.4.1", package = "winter-utils", default-features = false } [dev-dependencies] -criterion = "0.4" +criterion = { version = "0.4", features = ["html_reports"] } proptest = "1.0.0" rand_utils = { version = "0.4", package = "winter-rand-utils" } diff --git a/benches/smt.rs b/benches/smt.rs new file mode 100644 index 0000000..c03431f --- /dev/null +++ b/benches/smt.rs @@ -0,0 +1,84 @@ +use core::mem::swap; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use miden_crypto::{merkle::SimpleSmt, Felt, Word}; +use rand_utils::prng_array; + +fn smt_rpo(c: &mut Criterion) { + // setup trees + + let mut seed = [0u8; 32]; + let mut trees = vec![]; + + for depth in 14..=20 { + let leaves = ((1 << depth) - 1) as u64; + for count in [1, leaves / 2, leaves] { + let entries: Vec<_> = (0..count) + .map(|i| { + let word = generate_word(&mut seed); + (i, word) + }) + .collect(); + let tree = SimpleSmt::new(entries, depth).unwrap(); + trees.push(tree); + } + } + + let leaf = generate_word(&mut seed); + + // benchmarks + + let mut insert = c.benchmark_group(format!("smt update_leaf")); + + for tree in trees.iter_mut() { + let depth = tree.depth(); + let count = tree.leaves_count() as u64; + let key = count >> 2; + insert.bench_with_input( + format!("simple smt(depth:{depth},count:{count})"), + &(key, leaf), + |b, (key, leaf)| { + b.iter(|| { + tree.update_leaf(black_box(*key), black_box(*leaf)).unwrap(); + }); + }, + ); + } + + insert.finish(); + + let mut path = c.benchmark_group(format!("smt get_leaf_path")); + + for tree in trees.iter_mut() { + let depth = tree.depth(); + let count = tree.leaves_count() as u64; + let key = count >> 2; + path.bench_with_input( + format!("simple smt(depth:{depth},count:{count})"), + &key, + |b, key| { + b.iter(|| { + tree.get_leaf_path(black_box(*key)).unwrap(); + }); + }, + ); + } + + path.finish(); +} + +criterion_group!(smt_group, smt_rpo); +criterion_main!(smt_group); + +// HELPER FUNCTIONS +// -------------------------------------------------------------------------------------------- + +fn generate_word(seed: &mut [u8; 32]) -> Word { + swap(seed, &mut prng_array(*seed)); + let nums: [u64; 4] = prng_array(*seed); + [ + Felt::new(nums[0]), + Felt::new(nums[1]), + Felt::new(nums[2]), + Felt::new(nums[3]), + ] +} diff --git a/src/lib.rs b/src/lib.rs index ff2eb43..ce650c6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,11 +23,14 @@ pub mod utils { // ================================================================================================ /// A group of four field elements in the Miden base field. -pub type Word = [Felt; 4]; +pub type Word = [Felt; WORD_SIZE]; // CONSTANTS // ================================================================================================ +/// Number of field elements in a word. +pub const WORD_SIZE: usize = 4; + /// Field element representing ZERO in the Miden base filed. pub const ZERO: Felt = Felt::ZERO; diff --git a/src/merkle/merkle_tree.rs b/src/merkle/merkle_tree.rs index 8763d96..c0c381c 100644 --- a/src/merkle/merkle_tree.rs +++ b/src/merkle/merkle_tree.rs @@ -1,4 +1,4 @@ -use super::{Digest, Felt, MerkleError, Rpo256, Vec, Word}; +use super::{Felt, MerkleError, Rpo256, RpoDigest, Vec, Word}; use crate::{utils::uninit_vector, FieldElement}; use core::slice; use winter_math::log2; @@ -22,7 +22,7 @@ impl MerkleTree { pub fn new(leaves: Vec) -> Result { let n = leaves.len(); if n <= 1 { - return Err(MerkleError::DepthTooSmall); + return Err(MerkleError::DepthTooSmall(n as u32)); } else if !n.is_power_of_two() { return Err(MerkleError::NumLeavesNotPowerOfTwo(n)); } @@ -35,7 +35,8 @@ impl MerkleTree { nodes[n..].copy_from_slice(&leaves); // re-interpret nodes as an array of two nodes fused together - let two_nodes = unsafe { slice::from_raw_parts(nodes.as_ptr() as *const [Digest; 2], n) }; + let two_nodes = + unsafe { slice::from_raw_parts(nodes.as_ptr() as *const [RpoDigest; 2], n) }; // calculate all internal tree nodes for i in (1..n).rev() { @@ -68,7 +69,7 @@ impl MerkleTree { /// * The specified index not valid for the specified depth. pub fn get_node(&self, depth: u32, index: u64) -> Result { if depth == 0 { - return Err(MerkleError::DepthTooSmall); + return Err(MerkleError::DepthTooSmall(depth)); } else if depth > self.depth() { return Err(MerkleError::DepthTooBig(depth)); } @@ -89,7 +90,7 @@ impl MerkleTree { /// * The specified index not valid for the specified depth. pub fn get_path(&self, depth: u32, index: u64) -> Result, MerkleError> { if depth == 0 { - return Err(MerkleError::DepthTooSmall); + return Err(MerkleError::DepthTooSmall(depth)); } else if depth > self.depth() { return Err(MerkleError::DepthTooBig(depth)); } @@ -123,7 +124,7 @@ impl MerkleTree { let n = self.nodes.len() / 2; let two_nodes = - unsafe { slice::from_raw_parts(self.nodes.as_ptr() as *const [Digest; 2], n) }; + unsafe { slice::from_raw_parts(self.nodes.as_ptr() as *const [RpoDigest; 2], n) }; for _ in 0..depth { index /= 2; diff --git a/src/merkle/mod.rs b/src/merkle/mod.rs index 1b137b9..87cd80f 100644 --- a/src/merkle/mod.rs +++ b/src/merkle/mod.rs @@ -1,8 +1,9 @@ use super::{ - hash::rpo::{Rpo256, RpoDigest as Digest}, + hash::rpo::{Rpo256, RpoDigest}, utils::collections::{BTreeMap, Vec}, Felt, Word, ZERO, }; +use core::fmt; mod merkle_tree; pub use merkle_tree::MerkleTree; @@ -10,20 +11,51 @@ pub use merkle_tree::MerkleTree; mod merkle_path_set; pub use merkle_path_set::MerklePathSet; +mod simple_smt; +pub use simple_smt::SimpleSmt; + // ERRORS // ================================================================================================ #[derive(Clone, Debug)] pub enum MerkleError { - DepthTooSmall, + DepthTooSmall(u32), DepthTooBig(u32), NumLeavesNotPowerOfTwo(usize), InvalidIndex(u32, u64), InvalidDepth(u32, u32), InvalidPath(Vec), + InvalidEntriesCount(usize, usize), NodeNotInSet(u64), } +impl fmt::Display for MerkleError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use MerkleError::*; + match self { + DepthTooSmall(depth) => write!(f, "the provided depth {depth} is too small"), + DepthTooBig(depth) => write!(f, "the provided depth {depth} is too big"), + NumLeavesNotPowerOfTwo(leaves) => { + write!(f, "the leaves count {leaves} is not a power of 2") + } + InvalidIndex(depth, index) => write!( + f, + "the leaf index {index} is not valid for the depth {depth}" + ), + InvalidDepth(expected, provided) => write!( + f, + "the provided depth {provided} is not valid for {expected}" + ), + InvalidPath(_path) => write!(f, "the provided path is not valid"), + InvalidEntriesCount(max, provided) => write!(f, "the provided number of entries is {provided}, but the maximum for the given depth is {max}"), + NodeNotInSet(index) => write!(f, "the node indexed by {index} is not in the set"), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for MerkleError {} + // HELPER FUNCTIONS // ================================================================================================ diff --git a/src/merkle/simple_smt/mod.rs b/src/merkle/simple_smt/mod.rs new file mode 100644 index 0000000..8de3103 --- /dev/null +++ b/src/merkle/simple_smt/mod.rs @@ -0,0 +1,269 @@ +use super::{BTreeMap, MerkleError, Rpo256, RpoDigest, Vec, Word}; + +#[cfg(test)] +mod tests; + +// SPARSE MERKLE TREE +// ================================================================================================ + +/// A sparse Merkle tree with 63-bit keys and 4-element leaf values, without compaction. +/// Manipulation and retrieval of leaves and internal nodes is provided by its internal `Store`. +/// The root of the tree is recomputed on each new leaf update. +#[derive(Clone, Debug)] +pub struct SimpleSmt { + root: Word, + depth: u32, + store: Store, +} + +impl SimpleSmt { + // CONSTANTS + // -------------------------------------------------------------------------------------------- + + /// Minimum supported depth. + pub const MIN_DEPTH: u32 = 1; + + /// Maximum supported depth. + pub const MAX_DEPTH: u32 = 63; + + // CONSTRUCTORS + // -------------------------------------------------------------------------------------------- + + /// Creates a new simple SMT. + /// + /// The provided entries will be tuples of the leaves and their corresponding keys. + /// + /// # Errors + /// + /// The function will fail if the provided entries count exceed the maximum tree capacity, that + /// is `2^{depth}`. + pub fn new(entries: R, depth: u32) -> Result + where + R: IntoIterator, + I: Iterator + ExactSizeIterator, + { + let mut entries = entries.into_iter(); + + // validate the range of the depth. + let max = 1 << depth; + if depth < Self::MIN_DEPTH { + return Err(MerkleError::DepthTooSmall(depth)); + } else if Self::MAX_DEPTH < depth { + return Err(MerkleError::DepthTooBig(depth)); + } else if entries.len() > max { + return Err(MerkleError::InvalidEntriesCount(max, entries.len())); + } + + let (store, root) = Store::new(depth); + let mut tree = Self { root, depth, store }; + entries.try_for_each(|(key, leaf)| tree.insert_leaf(key, leaf))?; + + Ok(tree) + } + + /// Returns the root of this Merkle tree. + pub const fn root(&self) -> Word { + self.root + } + + /// Returns the depth of this Merkle tree. + pub const fn depth(&self) -> u32 { + self.depth + } + + /// Returns the set count of the keys of the leaves. + pub fn leaves_count(&self) -> usize { + self.store.leaves_count() + } + + /// Returns a node at the specified key + /// + /// # Errors + /// Returns an error if: + /// * The specified depth is greater than the depth of the tree. + /// * The specified key does not exist + pub fn get_node(&self, depth: u32, key: u64) -> Result { + if depth == 0 { + Err(MerkleError::DepthTooSmall(depth)) + } else if depth > self.depth() { + Err(MerkleError::DepthTooBig(depth)) + } else if depth == self.depth() { + self.store.get_leaf_node(key) + } else { + let branch_node = self.store.get_branch_node(key, depth)?; + Ok(Rpo256::merge(&[branch_node.left, branch_node.right]).into()) + } + } + + /// Returns a Merkle path from the node at the specified key to the root. The node itself is + /// not included in the path. + /// + /// # Errors + /// Returns an error if: + /// * The specified key does not exist as a branch or leaf node + /// * The specified depth is greater than the depth of the tree. + pub fn get_path(&self, depth: u32, key: u64) -> Result, MerkleError> { + if depth == 0 { + return Err(MerkleError::DepthTooSmall(depth)); + } else if depth > self.depth() { + return Err(MerkleError::DepthTooBig(depth)); + } else if depth == self.depth() && !self.store.check_leaf_node_exists(key) { + return Err(MerkleError::InvalidIndex(self.depth(), key)); + } + + let mut path = Vec::with_capacity(depth as usize); + let mut curr_key = key; + for n in (0..depth).rev() { + let parent_key = curr_key >> 1; + let parent_node = self.store.get_branch_node(parent_key, n)?; + let sibling_node = if curr_key & 1 == 1 { + parent_node.left + } else { + parent_node.right + }; + path.push(sibling_node.into()); + curr_key >>= 1; + } + Ok(path) + } + + /// Return a Merkle path from the leaf at the specified key to the root. The leaf itself is not + /// included in the path. + /// + /// # Errors + /// Returns an error if: + /// * The specified key does not exist as a leaf node. + pub fn get_leaf_path(&self, key: u64) -> Result, MerkleError> { + self.get_path(self.depth(), key) + } + + /// Replaces the leaf located at the specified key, and recomputes hashes by walking up the tree + /// + /// # Errors + /// Returns an error if the specified key is not a valid leaf index for this tree. + pub fn update_leaf(&mut self, key: u64, value: Word) -> Result<(), MerkleError> { + if !self.store.check_leaf_node_exists(key) { + return Err(MerkleError::InvalidIndex(self.depth(), key)); + } + self.insert_leaf(key, value)?; + + Ok(()) + } + + /// Inserts a leaf located at the specified key, and recomputes hashes by walking up the tree + pub fn insert_leaf(&mut self, key: u64, value: Word) -> Result<(), MerkleError> { + self.store.insert_leaf_node(key, value); + + let depth = self.depth(); + let mut curr_key = key; + let mut curr_node: RpoDigest = value.into(); + for n in (0..depth).rev() { + let parent_key = curr_key >> 1; + let parent_node = self + .store + .get_branch_node(parent_key, n) + .unwrap_or_else(|_| self.store.get_empty_node((n + 1) as usize)); + let (left, right) = if curr_key & 1 == 1 { + (parent_node.left, curr_node) + } else { + (curr_node, parent_node.right) + }; + + self.store.insert_branch_node(parent_key, n, left, right); + curr_key = parent_key; + curr_node = Rpo256::merge(&[left, right]); + } + self.root = curr_node.into(); + + Ok(()) + } +} + +// STORE +// ================================================================================================ + +/// A data store for sparse Merkle tree key-value pairs. +/// Leaves and branch nodes are stored separately in B-tree maps, indexed by key and (key, depth) +/// respectively. Hashes for blank subtrees at each layer are stored in `empty_hashes`, beginning +/// with the root hash of an empty tree, and ending with the zero value of a leaf node. +#[derive(Clone, Debug)] +struct Store { + branches: BTreeMap<(u64, u32), BranchNode>, + leaves: BTreeMap, + empty_hashes: Vec, + depth: u32, +} + +#[derive(Clone, Debug, Default)] +struct BranchNode { + left: RpoDigest, + right: RpoDigest, +} + +impl Store { + fn new(depth: u32) -> (Self, Word) { + let branches = BTreeMap::new(); + let leaves = BTreeMap::new(); + + // Construct empty node digests for each layer of the tree + let empty_hashes: Vec = (0..depth + 1) + .scan(Word::default().into(), |state, _| { + let value = *state; + *state = Rpo256::merge(&[value, value]); + Some(value) + }) + .collect::>() + .into_iter() + .rev() + .collect(); + + let root = empty_hashes[0].into(); + let store = Self { + branches, + leaves, + empty_hashes, + depth, + }; + + (store, root) + } + + fn get_empty_node(&self, depth: usize) -> BranchNode { + let digest = self.empty_hashes[depth]; + BranchNode { + left: digest, + right: digest, + } + } + + fn check_leaf_node_exists(&self, key: u64) -> bool { + self.leaves.contains_key(&key) + } + + fn get_leaf_node(&self, key: u64) -> Result { + self.leaves + .get(&key) + .cloned() + .ok_or(MerkleError::InvalidIndex(self.depth, key)) + } + + fn insert_leaf_node(&mut self, key: u64, node: Word) { + self.leaves.insert(key, node); + } + + fn get_branch_node(&self, key: u64, depth: u32) -> Result { + self.branches + .get(&(key, depth)) + .cloned() + .ok_or(MerkleError::InvalidIndex(depth, key)) + } + + fn insert_branch_node(&mut self, key: u64, depth: u32, left: RpoDigest, right: RpoDigest) { + let node = BranchNode { left, right }; + self.branches.insert((key, depth), node); + } + + fn leaves_count(&self) -> usize { + self.leaves.len() + } +} diff --git a/src/merkle/simple_smt/tests.rs b/src/merkle/simple_smt/tests.rs new file mode 100644 index 0000000..7042d1b --- /dev/null +++ b/src/merkle/simple_smt/tests.rs @@ -0,0 +1,263 @@ +use super::{ + super::{MerkleTree, RpoDigest, SimpleSmt}, + Rpo256, Vec, Word, +}; +use crate::{Felt, FieldElement}; +use core::iter; +use proptest::prelude::*; +use rand_utils::prng_array; + +const KEYS4: [u64; 4] = [0, 1, 2, 3]; +const KEYS8: [u64; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; + +const VALUES4: [Word; 4] = [ + int_to_node(1), + int_to_node(2), + int_to_node(3), + int_to_node(4), +]; + +const VALUES8: [Word; 8] = [ + int_to_node(1), + int_to_node(2), + int_to_node(3), + int_to_node(4), + int_to_node(5), + int_to_node(6), + int_to_node(7), + int_to_node(8), +]; + +const ZERO_VALUES8: [Word; 8] = [int_to_node(0); 8]; + +#[test] +fn build_empty_tree() { + let smt = SimpleSmt::new(iter::empty(), 3).unwrap(); + let mt = MerkleTree::new(ZERO_VALUES8.to_vec()).unwrap(); + assert_eq!(mt.root(), smt.root()); +} + +#[test] +fn empty_digests_are_consistent() { + let depth = 5; + let root = SimpleSmt::new(iter::empty(), depth).unwrap().root(); + let computed: [RpoDigest; 2] = (0..depth).fold([Default::default(); 2], |state, _| { + let digest = Rpo256::merge(&state); + [digest; 2] + }); + + assert_eq!(Word::from(computed[0]), root); +} + +#[test] +fn build_sparse_tree() { + let mut smt = SimpleSmt::new(iter::empty(), 3).unwrap(); + let mut values = ZERO_VALUES8.to_vec(); + + // insert single value + let key = 6; + let new_node = int_to_node(7); + values[key as usize] = new_node; + smt.insert_leaf(key, new_node) + .expect("Failed to insert leaf"); + let mt2 = MerkleTree::new(values.clone()).unwrap(); + assert_eq!(mt2.root(), smt.root()); + assert_eq!(mt2.get_path(3, 6).unwrap(), smt.get_path(3, 6).unwrap()); + + // insert second value at distinct leaf branch + let key = 2; + let new_node = int_to_node(3); + values[key as usize] = new_node; + smt.insert_leaf(key, new_node) + .expect("Failed to insert leaf"); + let mt3 = MerkleTree::new(values).unwrap(); + assert_eq!(mt3.root(), smt.root()); + assert_eq!(mt3.get_path(3, 2).unwrap(), smt.get_path(3, 2).unwrap()); +} + +#[test] +fn build_full_tree() { + let tree = SimpleSmt::new(KEYS4.into_iter().zip(VALUES4.into_iter()), 2).unwrap(); + + let (root, node2, node3) = compute_internal_nodes(); + assert_eq!(root, tree.root()); + assert_eq!(node2, tree.get_node(1, 0).unwrap()); + assert_eq!(node3, tree.get_node(1, 1).unwrap()); +} + +#[test] +fn get_values() { + let tree = SimpleSmt::new(KEYS4.into_iter().zip(VALUES4.into_iter()), 2).unwrap(); + + // check depth 2 + assert_eq!(VALUES4[0], tree.get_node(2, 0).unwrap()); + assert_eq!(VALUES4[1], tree.get_node(2, 1).unwrap()); + assert_eq!(VALUES4[2], tree.get_node(2, 2).unwrap()); + assert_eq!(VALUES4[3], tree.get_node(2, 3).unwrap()); +} + +#[test] +fn get_path() { + let tree = SimpleSmt::new(KEYS4.into_iter().zip(VALUES4.into_iter()), 2).unwrap(); + + let (_, node2, node3) = compute_internal_nodes(); + + // check depth 2 + assert_eq!(vec![VALUES4[1], node3], tree.get_path(2, 0).unwrap()); + assert_eq!(vec![VALUES4[0], node3], tree.get_path(2, 1).unwrap()); + assert_eq!(vec![VALUES4[3], node2], tree.get_path(2, 2).unwrap()); + assert_eq!(vec![VALUES4[2], node2], tree.get_path(2, 3).unwrap()); + + // check depth 1 + assert_eq!(vec![node3], tree.get_path(1, 0).unwrap()); + assert_eq!(vec![node2], tree.get_path(1, 1).unwrap()); +} + +#[test] +fn update_leaf() { + let mut tree = SimpleSmt::new(KEYS8.into_iter().zip(VALUES8.into_iter()), 3).unwrap(); + + // update one value + let key = 3; + let new_node = int_to_node(9); + let mut expected_values = VALUES8.to_vec(); + expected_values[key] = new_node; + let expected_tree = SimpleSmt::new( + KEYS8.into_iter().zip(expected_values.clone().into_iter()), + 3, + ) + .unwrap(); + + tree.update_leaf(key as u64, new_node).unwrap(); + assert_eq!(expected_tree.root, tree.root); + + // update another value + let key = 6; + let new_node = int_to_node(10); + expected_values[key] = new_node; + let expected_tree = + SimpleSmt::new(KEYS8.into_iter().zip(expected_values.into_iter()), 3).unwrap(); + + tree.update_leaf(key as u64, new_node).unwrap(); + assert_eq!(expected_tree.root, tree.root); +} + +#[test] +fn small_tree_opening_is_consistent() { + // ____k____ + // / \ + // _i_ _j_ + // / \ / \ + // e f g h + // / \ / \ / \ / \ + // a b 0 0 c 0 0 d + + let z = Word::from(RpoDigest::default()); + + let a = Word::from(Rpo256::merge(&[z.into(); 2])); + let b = Word::from(Rpo256::merge(&[a.into(); 2])); + let c = Word::from(Rpo256::merge(&[b.into(); 2])); + let d = Word::from(Rpo256::merge(&[c.into(); 2])); + + let e = Word::from(Rpo256::merge(&[a.into(), b.into()])); + let f = Word::from(Rpo256::merge(&[z.into(), z.into()])); + let g = Word::from(Rpo256::merge(&[c.into(), z.into()])); + let h = Word::from(Rpo256::merge(&[z.into(), d.into()])); + + let i = Word::from(Rpo256::merge(&[e.into(), f.into()])); + let j = Word::from(Rpo256::merge(&[g.into(), h.into()])); + + let k = Word::from(Rpo256::merge(&[i.into(), j.into()])); + + let depth = 3; + let entries = vec![(0, a), (1, b), (4, c), (7, d)]; + let tree = SimpleSmt::new(entries, depth).unwrap(); + + assert_eq!(tree.root(), Word::from(k)); + + let cases: Vec<(u32, u64, Vec)> = vec![ + (3, 0, vec![b, f, j]), + (3, 1, vec![a, f, j]), + (3, 4, vec![z, h, i]), + (3, 7, vec![z, g, i]), + (2, 0, vec![f, j]), + (2, 1, vec![e, j]), + (2, 2, vec![h, i]), + (2, 3, vec![g, i]), + (1, 0, vec![j]), + (1, 1, vec![i]), + ]; + + for (depth, key, path) in cases { + let opening = tree.get_path(depth, key).unwrap(); + + assert_eq!(path, opening); + } +} + +proptest! { + #[test] + fn arbitrary_openings_single_leaf( + depth in SimpleSmt::MIN_DEPTH..SimpleSmt::MAX_DEPTH, + key in prop::num::u64::ANY, + leaf in prop::num::u64::ANY, + ) { + let mut tree = SimpleSmt::new(iter::empty(), depth).unwrap(); + + let key = key % (1 << depth as u64); + let leaf = int_to_node(leaf); + + tree.insert_leaf(key, leaf.into()).unwrap(); + tree.get_leaf_path(key).unwrap(); + + // traverse to root, fetching all paths + for d in 1..depth { + let k = key >> (depth - d); + tree.get_path(d, k).unwrap(); + } + } + + #[test] + fn arbitrary_openings_multiple_leaves( + depth in SimpleSmt::MIN_DEPTH..SimpleSmt::MAX_DEPTH, + count in 2u8..10u8, + ref seed in any::<[u8; 32]>() + ) { + let mut tree = SimpleSmt::new(iter::empty(), depth).unwrap(); + let mut seed = *seed; + let leaves = (1 << depth) - 1; + + for _ in 0..count { + seed = prng_array(seed); + + let mut key = [0u8; 8]; + let mut leaf = [0u8; 8]; + + key.copy_from_slice(&seed[..8]); + leaf.copy_from_slice(&seed[8..16]); + + let key = u64::from_le_bytes(key); + let key = key % leaves; + let leaf = u64::from_le_bytes(leaf); + let leaf = int_to_node(leaf); + + tree.insert_leaf(key, leaf).unwrap(); + tree.get_leaf_path(key).unwrap(); + } + } +} + +// HELPER FUNCTIONS +// -------------------------------------------------------------------------------------------- + +fn compute_internal_nodes() -> (Word, Word, Word) { + let node2 = Rpo256::hash_elements(&[VALUES4[0], VALUES4[1]].concat()); + let node3 = Rpo256::hash_elements(&[VALUES4[2], VALUES4[3]].concat()); + let root = Rpo256::merge(&[node2, node3]); + + (root.into(), node2.into(), node3.into()) +} + +const fn int_to_node(value: u64) -> Word { + [Felt::new(value), Felt::ZERO, Felt::ZERO, Felt::ZERO] +} From 527455f60095f86bc4847afacf953138aba08052 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Tue, 13 Dec 2022 13:02:57 -0800 Subject: [PATCH 8/8] docs: minor updates to main and benchmark README files --- README.md | 2 ++ benches/README.md | 51 ++++++++++++++++++++++------------------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 1527df0..f50fbc1 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ This crate contains cryptographic primitives used in Polygon Miden. * [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3. * [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs. +For performance benchmarks of these hash functions and their comparison to other popular hash functions please see [here](./benches/). + ## Merkle [Merkle module](./src/merkle/) provides a set of data structures related to Merkle trees. All these data structures are implemented using the RPO hash function described above. The data structures are: diff --git a/benches/README.md b/benches/README.md index 859ad73..059c392 100644 --- a/benches/README.md +++ b/benches/README.md @@ -1,52 +1,49 @@ # Miden VM Hash Functions In the Miden VM, we make use of different hash functions. Some of these are "traditional" hash functions, like `BLAKE3`, which are optimized for out-of-STARK performance, while others are algebraic hash functions, like `Rescue Prime`, and are more optimized for a better performance inside the STARK. In what follows, we benchmark several such hash functions and compare against other constructions that are used by other proving systems. More precisely, we benchmark: -* **Rescue Prime:** -As specified [here](https://eprint.iacr.org/2020/1143) and implemented [here](https://github.com/novifinancial/winterfell/blob/46dce1adf0/crypto/src/hash/rescue/rp64_256/mod.rs). - -* **Rescue Prime Optimized:** -As specified [here](https://eprint.iacr.org/2022/1577) and implemented in this crate. - -* **BLAKE3:** -As specified [here](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) and implemented in this crate. - -* **SHA3:** -As specified [here](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) and implemented [here](https://github.com/novifinancial/winterfell/blob/46dce1adf0/crypto/src/hash/sha/mod.rs). - -* **Poseidon:** -As specified [here](https://eprint.iacr.org/2019/458.pdf) and implemented (in pure Rust, without vectorized instructions) [here](https://github.com/mir-protocol/plonky2/blob/main/plonky2/src/hash/poseidon_goldilocks.rs). +* **BLAKE3** as specified [here](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) and implemented [here](https://github.com/BLAKE3-team/BLAKE3) (with a wrapper exposed via this crate). +* **SHA3** as specified [here](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) and implemented [here](https://github.com/novifinancial/winterfell/blob/46dce1adf0/crypto/src/hash/sha/mod.rs). +* **Poseidon** as specified [here](https://eprint.iacr.org/2019/458.pdf) and implemented [here](https://github.com/mir-protocol/plonky2/blob/806b88d7d6e69a30dc0b4775f7ba275c45e8b63b/plonky2/src/hash/poseidon_goldilocks.rs) (but in pure Rust, without vectorized instructions). +* **Rescue Prime (RP)** as specified [here](https://eprint.iacr.org/2020/1143) and implemented [here](https://github.com/novifinancial/winterfell/blob/46dce1adf0/crypto/src/hash/rescue/rp64_256/mod.rs). +* **Rescue Prime Optimized (RPO)** as specified [here](https://eprint.iacr.org/2022/1577) and implemented in this crate. ## Comparison and Instructions ### Comparison We benchmark the above hash functions using two scenarios. The first is a 2-to-1 $(a,b)\mapsto h(a,b)$ hashing where both $a$, $b$ and $h(a,b)$ are the digests corresponding to each of the hash functions. -The second scenario is that of sequential hashing where we take a sequence of length $100$ field elements and hash these to produce a single digest. The digests are $4$ field elements (i.e. 256-bit) for Poseidon, Rescue Prime and RPO, and an array `[u8;32]` for SHA3 and BLAKE3. +The second scenario is that of sequential hashing where we take a sequence of length $100$ field elements and hash these to produce a single digest. The digests are $4$ field elements in a prime field with modulus $2^{64} - 2^{32} + 1$ (i.e., 32 bytes) for Poseidon, Rescue Prime and RPO, and an array `[u8; 32]` for SHA3 and BLAKE3. #### Scenario 1: 2-to-1 hashing `h(a,b)` -| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | -| ----------------- | ------ | --------| --------- | --------- | ------- | -| Apple M1 Pro | 80 ns | 245 ns | 1.3 us | 9.1 us | 5.4 us | -| Apple M2 | 76 ns | 233 ns | 1.2 us | 7.9 us | 5.0 us | -| AMD Ryzen 9 5950X | 64 ns | 273 ns | 1.2 us | 9.1 us | 5.5 us | +| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | +| ------------------- | ------ | --------| --------- | --------- | ------- | +| Apple M1 Pro | 80 ns | 245 ns | 1.5 us | 9.1 us | 5.4 us | +| Apple M2 | 76 ns | 233 ns | 1.3 us | 7.9 us | 5.0 us | +| Amazon Graviton 3 | 116 ns | | | | 8.8 us | +| AMD Ryzen 9 5950X | 64 ns | 273 ns | 1.2 us | 9.1 us | 5.5 us | +| Intel Core i5-8279U | 80 ns | | | | 8.7 us | +| Intel Xeon 8375C | 67 ns | | | | 8.2 us | #### Scenario 2: Sequential hashing of 100 elements `h([a_0,...,a_99])` -| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | -| ----------------- | -------| ------- | --------- | --------- | ------- | -| Apple M1 Pro | 1.1 us | 1.5 us | 17.3 us | 118 us | 70 us | -| Apple M2 | 1.0 us | 1.5 us | 15.5 us | 103 us | 65 us | -| AMD Ryzen 9 5950X | 0.8 us | 1.7 us | 15.7 us | 120 us | 72 us | +| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | +| ------------------- | -------| ------- | --------- | --------- | ------- | +| Apple M1 Pro | 1.1 us | 1.5 us | 19.4 us | 118 us | 70 us | +| Apple M2 | 1.0 us | 1.5 us | 17.4 us | 103 us | 65 us | +| Amazon Graviton 3 | 1.4 us | | | | 114 us | +| AMD Ryzen 9 5950X | 0.8 us | 1.7 us | 15.7 us | 120 us | 72 us | +| Intel Core i5-8279U | 1.0 us | | | | 116 us | +| Intel Xeon 8375C | 0.8 ns | | | | 110 us | ### Instructions Before you can run the benchmarks, you'll need to make sure you have Rust [installed](https://www.rust-lang.org/tools/install). After that, to run the benchmarks for RPO and BLAKE3, clone the current repository, and from the root directory of the repo run the following: ``` - cargo bench --bench hash + cargo bench hash ``` To run the benchmarks for Rescue Prime, Poseidon and SHA3, clone the following [repository](https://github.com/Dominik1999/winterfell.git) as above, then checkout the `hash-functions-benches` branch, and from the root directory of the repo run the following: ``` -cargo bench --bench hash +cargo bench hash ``` \ No newline at end of file