mirror of
https://github.com/arnaucube/miden-crypto.git
synced 2026-01-12 17:11:28 +01:00
Compare commits
45 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d2a6739605 | ||
|
|
cae87a2790 | ||
|
|
335c50f54d | ||
|
|
b151773b0d | ||
|
|
1867f842d3 | ||
|
|
e1072ecc7f | ||
|
|
063ad49afd | ||
|
|
a27f9ad828 | ||
|
|
50dd6bda19 | ||
|
|
ee20a49953 | ||
|
|
0d75e3593b | ||
|
|
689cc93ed1 | ||
|
|
7970d3a736 | ||
|
|
a734dace1e | ||
|
|
940cc04670 | ||
|
|
e82baa35bb | ||
|
|
876d1bf97a | ||
|
|
8adc0ab418 | ||
|
|
c2eb38c236 | ||
|
|
a924ac6b81 | ||
|
|
e214608c85 | ||
|
|
c44ccd9dec | ||
|
|
e34900c7d8 | ||
|
|
2b184cd4ca | ||
|
|
913384600d | ||
|
|
ae807a47ae | ||
|
|
f4a9d5b027 | ||
|
|
ee42d87121 | ||
|
|
b1cb2b6ec3 | ||
|
|
e4a9a2ac00 | ||
|
|
c5077b1683 | ||
|
|
2e74028fd4 | ||
|
|
8bf6ef890d | ||
|
|
e2aeb25e01 | ||
|
|
790846cc73 | ||
|
|
4cb6bed428 | ||
|
|
a12e62ff22 | ||
|
|
9aa4987858 | ||
|
|
70a0a1e970 | ||
|
|
025fbb66a9 | ||
|
|
5ee5e8554b | ||
|
|
ac3c6976bd | ||
|
|
374a10f340 | ||
|
|
ad0f472708 | ||
|
|
d92fae7f82 |
3
.config/nextest.toml
Normal file
3
.config/nextest.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[profile.default]
|
||||
failure-output = "immediate-final"
|
||||
fail-fast = false
|
||||
25
.github/workflows/build.yml
vendored
Normal file
25
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Runs build related jobs.
|
||||
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, next]
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
no-std:
|
||||
name: Build for no-std
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable, nightly]
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Build for no-std
|
||||
run: |
|
||||
rustup update --no-self-update ${{ matrix.toolchain }}
|
||||
rustup target add wasm32-unknown-unknown
|
||||
make build-no-std
|
||||
23
.github/workflows/changelog.yml
vendored
Normal file
23
.github/workflows/changelog.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# Runs changelog related jobs.
|
||||
# CI job heavily inspired by: https://github.com/tarides/changelog-check-action
|
||||
|
||||
name: changelog
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@main
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Check for changes in changelog
|
||||
env:
|
||||
BASE_REF: ${{ github.event.pull_request.base.ref }}
|
||||
NO_CHANGELOG_LABEL: ${{ contains(github.event.pull_request.labels.*.name, 'no changelog') }}
|
||||
run: ./scripts/check-changelog.sh "${{ inputs.changelog }}"
|
||||
shell: bash
|
||||
31
.github/workflows/doc.yml
vendored
31
.github/workflows/doc.yml
vendored
@@ -1,31 +0,0 @@
|
||||
# Runs documentation related jobs.
|
||||
|
||||
name: doc
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
docs:
|
||||
name: Verify the docs on ${{matrix.toolchain}}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - doc
|
||||
run: cargo make doc
|
||||
81
.github/workflows/lint.yml
vendored
81
.github/workflows/lint.yml
vendored
@@ -4,63 +4,50 @@ name: lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
branches: [main, next]
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
name: clippy nightly on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Clippy
|
||||
run: |
|
||||
rustup update --no-self-update nightly
|
||||
rustup +nightly component add clippy
|
||||
make clippy
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt check nightly on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Rustfmt
|
||||
run: |
|
||||
rustup update --no-self-update nightly
|
||||
rustup +nightly component add rustfmt
|
||||
make format-check
|
||||
|
||||
doc:
|
||||
name: doc stable on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Build docs
|
||||
run: |
|
||||
rustup update --no-self-update
|
||||
make doc
|
||||
|
||||
version:
|
||||
name: check rust version consistency
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@main
|
||||
with:
|
||||
profile: minimal
|
||||
override: true
|
||||
- name: check rust versions
|
||||
run: ./scripts/check-rust-version.sh
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt ${{matrix.toolchain}} on ${{matrix.os}}
|
||||
runs-on: ${{matrix.os}}-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [nightly]
|
||||
os: [ubuntu]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install minimal Rust with rustfmt
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
components: rustfmt
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - format-check
|
||||
run: cargo make format-check
|
||||
|
||||
clippy:
|
||||
name: clippy ${{matrix.toolchain}} on ${{matrix.os}}
|
||||
runs-on: ${{matrix.os}}-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable]
|
||||
os: [ubuntu]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install minimal Rust with clippy
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
components: clippy
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - clippy
|
||||
run: cargo make clippy
|
||||
|
||||
32
.github/workflows/no-std.yml
vendored
32
.github/workflows/no-std.yml
vendored
@@ -1,32 +0,0 @@
|
||||
# Runs no-std related jobs.
|
||||
|
||||
name: no-std
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
no-std:
|
||||
name: build ${{matrix.toolchain}} no-std for wasm32-unknown-unknown
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable, nightly]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
override: true
|
||||
- run: rustup target add wasm32-unknown-unknown
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - build-no-std
|
||||
run: cargo make build-no-std
|
||||
26
.github/workflows/test.yml
vendored
26
.github/workflows/test.yml
vendored
@@ -1,34 +1,28 @@
|
||||
# Runs testing related jobs
|
||||
# Runs test related jobs.
|
||||
|
||||
name: test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
branches: [main, next]
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: test ${{matrix.toolchain}} on ${{matrix.os}} with ${{matrix.features}}
|
||||
name: test ${{matrix.toolchain}} on ${{matrix.os}} with ${{matrix.args}}
|
||||
runs-on: ${{matrix.os}}-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable, nightly]
|
||||
os: [ubuntu]
|
||||
features: ["test", "test-no-default-features"]
|
||||
args: [default, no-std]
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - test
|
||||
run: cargo make ${{matrix.features}}
|
||||
- uses: actions/checkout@main
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- name: Perform tests
|
||||
run: |
|
||||
rustup update --no-self-update ${{matrix.toolchain}}
|
||||
make test-${{matrix.args}}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
@@ -15,29 +15,20 @@ repos:
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- repo: https://github.com/hackaugusto/pre-commit-cargo
|
||||
rev: v1.0.0
|
||||
- repo: local
|
||||
hooks:
|
||||
# Allows cargo fmt to modify the source code prior to the commit
|
||||
- id: cargo
|
||||
name: Cargo fmt
|
||||
args: ["+stable", "fmt", "--all"]
|
||||
- id: lint
|
||||
name: Make lint
|
||||
stages: [commit]
|
||||
# Requires code to be properly formatted prior to pushing upstream
|
||||
- id: cargo
|
||||
name: Cargo fmt --check
|
||||
args: ["+stable", "fmt", "--all", "--check"]
|
||||
stages: [push, manual]
|
||||
- id: cargo
|
||||
name: Cargo check --all-targets
|
||||
args: ["+stable", "check", "--all-targets"]
|
||||
- id: cargo
|
||||
name: Cargo check --all-targets --no-default-features
|
||||
args: ["+stable", "check", "--all-targets", "--no-default-features"]
|
||||
- id: cargo
|
||||
name: Cargo check --all-targets --features default,std,serde
|
||||
args: ["+stable", "check", "--all-targets", "--features", "default,std,serde"]
|
||||
# Unlike fmt, clippy will not be automatically applied
|
||||
- id: cargo
|
||||
name: Cargo clippy
|
||||
args: ["+nightly", "clippy", "--workspace", "--", "--deny", "clippy::all", "--deny", "warnings"]
|
||||
language: rust
|
||||
entry: make lint
|
||||
- id: doc
|
||||
name: Make doc
|
||||
stages: [commit]
|
||||
language: rust
|
||||
entry: make doc
|
||||
- id: check
|
||||
name: Make check
|
||||
stages: [commit]
|
||||
language: rust
|
||||
entry: make check
|
||||
|
||||
116
CHANGELOG.md
116
CHANGELOG.md
@@ -1,84 +1,120 @@
|
||||
## 0.13.0 (2024-11-24)
|
||||
|
||||
- Fixed a bug in the implementation of `draw_integers` for `RpoRandomCoin` (#343).
|
||||
- [BREAKING] Refactor error messages and use `thiserror` to derive errors (#344).
|
||||
- [BREAKING] Updated Winterfell dependency to v0.11 (#346).
|
||||
- Added RPO-STARK based DSA (#349).
|
||||
- Added benchmarks for DSA implementations (#354).
|
||||
- Implemented deterministic RPO-STARK based DSA (#358).
|
||||
|
||||
## 0.12.0 (2024-10-30)
|
||||
|
||||
- [BREAKING] Updated Winterfell dependency to v0.10 (#338).
|
||||
- Added parallel implementation of `Smt::with_entries()` with significantly better performance when the `concurrent` feature is enabled (#341).
|
||||
|
||||
## 0.11.0 (2024-10-17)
|
||||
|
||||
- [BREAKING]: renamed `Mmr::open()` into `Mmr::open_at()` and `Mmr::peaks()` into `Mmr::peaks_at()` (#234).
|
||||
- Added `Mmr::open()` and `Mmr::peaks()` which rely on `Mmr::open_at()` and `Mmr::peaks()` respectively (#234).
|
||||
- Standardized CI and Makefile across Miden repos (#323).
|
||||
- Added `Smt::compute_mutations()` and `Smt::apply_mutations()` for validation-checked insertions (#327).
|
||||
- Changed padding rule for RPO/RPX hash functions (#318).
|
||||
- [BREAKING] Changed return value of the `Mmr::verify()` and `MerklePath::verify()` from `bool` to `Result<>` (#335).
|
||||
- Added `is_empty()` functions to the `SimpleSmt` and `Smt` structures. Added `EMPTY_ROOT` constant to the `SparseMerkleTree` trait (#337).
|
||||
|
||||
## 0.10.3 (2024-09-25)
|
||||
|
||||
- Implement `get_size_hint` for `Smt` (#331).
|
||||
|
||||
## 0.10.2 (2024-09-25)
|
||||
|
||||
- Implement `get_size_hint` for `RpoDigest` and `RpxDigest` and expose constants for their serialized size (#330).
|
||||
|
||||
## 0.10.1 (2024-09-13)
|
||||
|
||||
- Added `Serializable` and `Deserializable` implementations for `PartialMmr` and `InOrderIndex` (#329).
|
||||
|
||||
## 0.10.0 (2024-08-06)
|
||||
|
||||
* Added more `RpoDigest` and `RpxDigest` conversions (#311).
|
||||
* [BREAKING] Migrated to Winterfell v0.9 (#315).
|
||||
* Fixed encoding of Falcon secret key (#319).
|
||||
- Added more `RpoDigest` and `RpxDigest` conversions (#311).
|
||||
- [BREAKING] Migrated to Winterfell v0.9 (#315).
|
||||
- Fixed encoding of Falcon secret key (#319).
|
||||
|
||||
## 0.9.3 (2024-04-24)
|
||||
|
||||
* Added `RpxRandomCoin` struct (#307).
|
||||
- Added `RpxRandomCoin` struct (#307).
|
||||
|
||||
## 0.9.2 (2024-04-21)
|
||||
|
||||
* Implemented serialization for the `Smt` struct (#304).
|
||||
* Fixed a bug in Falcon signature generation (#305).
|
||||
- Implemented serialization for the `Smt` struct (#304).
|
||||
- Fixed a bug in Falcon signature generation (#305).
|
||||
|
||||
## 0.9.1 (2024-04-02)
|
||||
|
||||
* Added `num_leaves()` method to `SimpleSmt` (#302).
|
||||
- Added `num_leaves()` method to `SimpleSmt` (#302).
|
||||
|
||||
## 0.9.0 (2024-03-24)
|
||||
|
||||
* [BREAKING] Removed deprecated re-exports from liballoc/libstd (#290).
|
||||
* [BREAKING] Refactored RpoFalcon512 signature to work with pure Rust (#285).
|
||||
* [BREAKING] Added `RngCore` as supertrait for `FeltRng` (#299).
|
||||
- [BREAKING] Removed deprecated re-exports from liballoc/libstd (#290).
|
||||
- [BREAKING] Refactored RpoFalcon512 signature to work with pure Rust (#285).
|
||||
- [BREAKING] Added `RngCore` as supertrait for `FeltRng` (#299).
|
||||
|
||||
# 0.8.4 (2024-03-17)
|
||||
|
||||
* Re-added unintentionally removed re-exported liballoc macros (`vec` and `format` macros).
|
||||
- Re-added unintentionally removed re-exported liballoc macros (`vec` and `format` macros).
|
||||
|
||||
# 0.8.3 (2024-03-17)
|
||||
|
||||
* Re-added unintentionally removed re-exported liballoc macros (#292).
|
||||
- Re-added unintentionally removed re-exported liballoc macros (#292).
|
||||
|
||||
# 0.8.2 (2024-03-17)
|
||||
|
||||
* Updated `no-std` approach to be in sync with winterfell v0.8.3 release (#290).
|
||||
- Updated `no-std` approach to be in sync with winterfell v0.8.3 release (#290).
|
||||
|
||||
## 0.8.1 (2024-02-21)
|
||||
|
||||
* Fixed clippy warnings (#280)
|
||||
- Fixed clippy warnings (#280)
|
||||
|
||||
## 0.8.0 (2024-02-14)
|
||||
|
||||
* Implemented the `PartialMmr` data structure (#195).
|
||||
* Implemented RPX hash function (#201).
|
||||
* Added `FeltRng` and `RpoRandomCoin` (#237).
|
||||
* Accelerated RPO/RPX hash functions using AVX512 instructions (#234).
|
||||
* Added `inner_nodes()` method to `PartialMmr` (#238).
|
||||
* Improved `PartialMmr::apply_delta()` (#242).
|
||||
* Refactored `SimpleSmt` struct (#245).
|
||||
* Replaced `TieredSmt` struct with `Smt` struct (#254, #277).
|
||||
* Updated Winterfell dependency to v0.8 (#275).
|
||||
- Implemented the `PartialMmr` data structure (#195).
|
||||
- Implemented RPX hash function (#201).
|
||||
- Added `FeltRng` and `RpoRandomCoin` (#237).
|
||||
- Accelerated RPO/RPX hash functions using AVX512 instructions (#234).
|
||||
- Added `inner_nodes()` method to `PartialMmr` (#238).
|
||||
- Improved `PartialMmr::apply_delta()` (#242).
|
||||
- Refactored `SimpleSmt` struct (#245).
|
||||
- Replaced `TieredSmt` struct with `Smt` struct (#254, #277).
|
||||
- Updated Winterfell dependency to v0.8 (#275).
|
||||
|
||||
## 0.7.1 (2023-10-10)
|
||||
|
||||
* Fixed RPO Falcon signature build on Windows.
|
||||
- Fixed RPO Falcon signature build on Windows.
|
||||
|
||||
## 0.7.0 (2023-10-05)
|
||||
|
||||
* Replaced `MerklePathSet` with `PartialMerkleTree` (#165).
|
||||
* Implemented clearing of nodes in `TieredSmt` (#173).
|
||||
* Added ability to generate inclusion proofs for `TieredSmt` (#174).
|
||||
* Implemented Falcon DSA (#179).
|
||||
* Added conditional `serde`` support for various structs (#180).
|
||||
* Implemented benchmarking for `TieredSmt` (#182).
|
||||
* Added more leaf traversal methods for `MerkleStore` (#185).
|
||||
* Added SVE acceleration for RPO hash function (#189).
|
||||
- Replaced `MerklePathSet` with `PartialMerkleTree` (#165).
|
||||
- Implemented clearing of nodes in `TieredSmt` (#173).
|
||||
- Added ability to generate inclusion proofs for `TieredSmt` (#174).
|
||||
- Implemented Falcon DSA (#179).
|
||||
- Added conditional `serde`` support for various structs (#180).
|
||||
- Implemented benchmarking for `TieredSmt` (#182).
|
||||
- Added more leaf traversal methods for `MerkleStore` (#185).
|
||||
- Added SVE acceleration for RPO hash function (#189).
|
||||
|
||||
## 0.6.0 (2023-06-25)
|
||||
|
||||
* [BREAKING] Added support for recording capabilities for `MerkleStore` (#162).
|
||||
* [BREAKING] Refactored Merkle struct APIs to use `RpoDigest` instead of `Word` (#157).
|
||||
* Added initial implementation of `PartialMerkleTree` (#156).
|
||||
- [BREAKING] Added support for recording capabilities for `MerkleStore` (#162).
|
||||
- [BREAKING] Refactored Merkle struct APIs to use `RpoDigest` instead of `Word` (#157).
|
||||
- Added initial implementation of `PartialMerkleTree` (#156).
|
||||
|
||||
## 0.5.0 (2023-05-26)
|
||||
|
||||
* Implemented `TieredSmt` (#152, #153).
|
||||
* Implemented ability to extract a subset of a `MerkleStore` (#151).
|
||||
* Cleaned up `SimpleSmt` interface (#149).
|
||||
* Decoupled hashing and padding of peaks in `Mmr` (#148).
|
||||
* Added `inner_nodes()` to `MerkleStore` (#146).
|
||||
- Implemented `TieredSmt` (#152, #153).
|
||||
- Implemented ability to extract a subset of a `MerkleStore` (#151).
|
||||
- Cleaned up `SimpleSmt` interface (#149).
|
||||
- Decoupled hashing and padding of peaks in `Mmr` (#148).
|
||||
- Added `inner_nodes()` to `MerkleStore` (#146).
|
||||
|
||||
## 0.4.0 (2023-04-21)
|
||||
|
||||
|
||||
360
Cargo.lock
generated
360
Cargo.lock
generated
@@ -19,9 +19,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.6.15"
|
||||
version = "0.6.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526"
|
||||
checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
@@ -34,55 +34,61 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.8"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1"
|
||||
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.5"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb"
|
||||
checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-query"
|
||||
version = "1.1.1"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a"
|
||||
checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-wincon"
|
||||
version = "3.0.4"
|
||||
version = "3.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8"
|
||||
checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arrayref"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a"
|
||||
checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.4"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
|
||||
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
|
||||
|
||||
[[package]]
|
||||
name = "assert_matches"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.3.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
@@ -107,9 +113,9 @@ checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
|
||||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.5.3"
|
||||
version = "1.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9ec96fe9a81b5e365f9db71fe00edc4fe4ca2cc7dcb7861f0603012a7caa210"
|
||||
checksum = "b8ee0c1824c4dea5b5f81736aff91bae041d2c07ee1192bec91054e10e3e601e"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
@@ -147,12 +153,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.1.7"
|
||||
version = "1.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc"
|
||||
checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -190,9 +197,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.13"
|
||||
version = "4.5.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc"
|
||||
checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -200,9 +207,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.13"
|
||||
version = "4.5.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99"
|
||||
checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -212,9 +219,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.13"
|
||||
version = "4.5.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0"
|
||||
checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -224,27 +231,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.7.2"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.2"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0"
|
||||
checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
|
||||
checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.12"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504"
|
||||
checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
@@ -344,19 +351,19 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.9"
|
||||
version = "0.3.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
|
||||
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.1.0"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a"
|
||||
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
@@ -411,9 +418,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.3.9"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
|
||||
checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
@@ -423,9 +430,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "is-terminal"
|
||||
version = "0.4.12"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b"
|
||||
checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
@@ -449,9 +456,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.11"
|
||||
version = "1.0.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
|
||||
checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
@@ -464,10 +471,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.69"
|
||||
version = "0.3.76"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
|
||||
checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
@@ -488,15 +496,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.155"
|
||||
version = "0.2.168"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
|
||||
checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d"
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.8"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
|
||||
checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
@@ -518,8 +526,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
|
||||
[[package]]
|
||||
name = "miden-crypto"
|
||||
version = "0.10.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"assert_matches",
|
||||
"blake3",
|
||||
"cc",
|
||||
"clap",
|
||||
@@ -533,13 +542,18 @@ dependencies = [
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rayon",
|
||||
"seq-macro",
|
||||
"serde",
|
||||
"sha3",
|
||||
"thiserror",
|
||||
"winter-air",
|
||||
"winter-crypto",
|
||||
"winter-math",
|
||||
"winter-prover",
|
||||
"winter-rand-utils",
|
||||
"winter-utils",
|
||||
"winter-verifier",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -618,9 +632,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.19.0"
|
||||
version = "1.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
@@ -629,10 +643,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.6"
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
|
||||
checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
"plotters-backend",
|
||||
@@ -643,15 +663,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plotters-backend"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
|
||||
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
|
||||
|
||||
[[package]]
|
||||
name = "plotters-svg"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
|
||||
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
|
||||
dependencies = [
|
||||
"plotters-backend",
|
||||
]
|
||||
@@ -667,9 +687,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.86"
|
||||
version = "1.0.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
|
||||
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -702,9 +722,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.36"
|
||||
version = "1.0.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
|
||||
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -770,9 +790,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.6"
|
||||
version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -782,9 +802,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.7"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
|
||||
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -793,21 +813,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.4"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
|
||||
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.34"
|
||||
version = "0.38.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f"
|
||||
checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -845,18 +865,18 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.204"
|
||||
version = "1.0.216"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
|
||||
checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.204"
|
||||
version = "1.0.216"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
|
||||
checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -865,9 +885,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.122"
|
||||
version = "1.0.133"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da"
|
||||
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -885,6 +905,12 @@ dependencies = [
|
||||
"keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
@@ -893,9 +919,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.72"
|
||||
version = "2.0.90"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af"
|
||||
checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -904,9 +930,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.12.0"
|
||||
version = "3.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
|
||||
checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
@@ -915,6 +941,26 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
|
||||
dependencies = [
|
||||
"thiserror-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "2.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinytemplate"
|
||||
version = "1.2.1"
|
||||
@@ -925,6 +971,34 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.17.0"
|
||||
@@ -939,9 +1013,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.12"
|
||||
version = "1.0.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
||||
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
@@ -982,23 +1056,23 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.92"
|
||||
version = "0.2.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
|
||||
checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.92"
|
||||
version = "0.2.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
|
||||
checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"log",
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
@@ -1007,9 +1081,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.92"
|
||||
version = "0.2.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
|
||||
checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
@@ -1017,9 +1091,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.92"
|
||||
version = "0.2.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
|
||||
checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1030,15 +1104,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.92"
|
||||
version = "0.2.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
|
||||
checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.69"
|
||||
version = "0.3.76"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef"
|
||||
checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
@@ -1135,33 +1209,82 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "winter-air"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"libm",
|
||||
"winter-crypto",
|
||||
"winter-fri",
|
||||
"winter-math",
|
||||
"winter-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winter-crypto"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00fbb724d2d9fbfd3aa16ea27f5e461d4fe1d74b0c9e0ed1bf79e9e2a955f4d5"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"sha3",
|
||||
"winter-math",
|
||||
"winter-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winter-fri"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"winter-crypto",
|
||||
"winter-math",
|
||||
"winter-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winter-math"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "004f85bb051ce986ec0b9a2bd90aaf81b83e3c67464becfdf7db31f14c1019ba"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"winter-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winter-maybe-async"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winter-prover"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"tracing",
|
||||
"winter-air",
|
||||
"winter-crypto",
|
||||
"winter-fri",
|
||||
"winter-math",
|
||||
"winter-maybe-async",
|
||||
"winter-rand-utils",
|
||||
"winter-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winter-rand-utils"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2b827c901ab0c316d89812858ff451d60855c0a5c7ae734b098c62a28624181"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"rand",
|
||||
"winter-utils",
|
||||
@@ -1169,9 +1292,20 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "winter-utils"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0568612a95bcae3c94fb14da2686f8279ca77723dbdf1e97cf3673798faf6485"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
|
||||
[[package]]
|
||||
name = "winter-verifier"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
|
||||
dependencies = [
|
||||
"winter-air",
|
||||
"winter-crypto",
|
||||
"winter-fri",
|
||||
"winter-math",
|
||||
"winter-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
|
||||
51
Cargo.toml
51
Cargo.toml
@@ -1,16 +1,16 @@
|
||||
[package]
|
||||
name = "miden-crypto"
|
||||
version = "0.10.0"
|
||||
version = "0.14.0"
|
||||
description = "Miden Cryptographic primitives"
|
||||
authors = ["miden contributors"]
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/0xPolygonMiden/crypto"
|
||||
documentation = "https://docs.rs/miden-crypto/0.10.0"
|
||||
documentation = "https://docs.rs/miden-crypto/0.14.0"
|
||||
categories = ["cryptography", "no-std"]
|
||||
keywords = ["miden", "crypto", "hash", "merkle"]
|
||||
edition = "2021"
|
||||
rust-version = "1.80"
|
||||
rust-version = "1.82"
|
||||
|
||||
[[bin]]
|
||||
name = "miden-crypto"
|
||||
@@ -19,6 +19,10 @@ bench = false
|
||||
doctest = false
|
||||
required-features = ["executable"]
|
||||
|
||||
[[bench]]
|
||||
name = "dsa"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "hash"
|
||||
harness = false
|
||||
@@ -27,13 +31,28 @@ harness = false
|
||||
name = "smt"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "smt-subtree"
|
||||
harness = false
|
||||
required-features = ["internal"]
|
||||
|
||||
[[bench]]
|
||||
name = "merkle"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "smt-with-entries"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "store"
|
||||
harness = false
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
concurrent = ["dep:rayon"]
|
||||
default = ["std", "concurrent"]
|
||||
executable = ["dep:clap", "dep:rand-utils", "std"]
|
||||
internal = []
|
||||
serde = ["dep:serde", "serde?/alloc", "winter-math/serde"]
|
||||
std = [
|
||||
"blake3/std",
|
||||
@@ -48,26 +67,32 @@ std = [
|
||||
[dependencies]
|
||||
blake3 = { version = "1.5", default-features = false }
|
||||
clap = { version = "4.5", optional = true, features = ["derive"] }
|
||||
getrandom = { version = "0.2", features = ["js"] }
|
||||
num = { version = "0.4", default-features = false, features = ["alloc", "libm"] }
|
||||
num-complex = { version = "0.4", default-features = false }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
rand-utils = { version = "0.9", package = "winter-rand-utils", optional = true }
|
||||
rand-utils = {git = 'https://github.com/Al-Kindi-0/winterfell', package = "winter-rand-utils" , branch = 'al-zk', optional = true }
|
||||
rayon = { version = "1.10", optional = true }
|
||||
serde = { version = "1.0", default-features = false, optional = true, features = ["derive"] }
|
||||
sha3 = { version = "0.10", default-features = false }
|
||||
winter-crypto = { version = "0.9", default-features = false }
|
||||
winter-math = { version = "0.9", default-features = false }
|
||||
winter-utils = { version = "0.9", default-features = false }
|
||||
thiserror = { version = "2.0", default-features = false }
|
||||
winter-air = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
|
||||
winter-crypto = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
|
||||
winter-prover = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
|
||||
winter-verifier = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
|
||||
winter-math = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
|
||||
winter-utils = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = { version = "1.5", default-features = false }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
getrandom = { version = "0.2", features = ["js"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["alloc"] }
|
||||
proptest = "1.4"
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
rand-utils = { version = "0.9", package = "winter-rand-utils" }
|
||||
proptest = "1.5"
|
||||
rand-utils = {git = 'https://github.com/Al-Kindi-0/winterfell', package = "winter-rand-utils" , branch = 'al-zk' }
|
||||
seq-macro = { version = "0.3" }
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", optional = true, features = ["parallel"] }
|
||||
cc = { version = "1.2", optional = true, features = ["parallel"] }
|
||||
glob = "0.3"
|
||||
|
||||
86
Makefile
Normal file
86
Makefile
Normal file
@@ -0,0 +1,86 @@
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
# -- variables --------------------------------------------------------------------------------------
|
||||
|
||||
WARNINGS=RUSTDOCFLAGS="-D warnings"
|
||||
DEBUG_OVERFLOW_INFO=RUSTFLAGS="-C debug-assertions -C overflow-checks -C debuginfo=2"
|
||||
|
||||
# -- linting --------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Run Clippy with configs
|
||||
$(WARNINGS) cargo +nightly clippy --workspace --all-targets --all-features
|
||||
|
||||
|
||||
.PHONY: fix
|
||||
fix: ## Run Fix with configs
|
||||
cargo +nightly fix --allow-staged --allow-dirty --all-targets --all-features
|
||||
|
||||
|
||||
.PHONY: format
|
||||
format: ## Run Format using nightly toolchain
|
||||
cargo +nightly fmt --all
|
||||
|
||||
|
||||
.PHONY: format-check
|
||||
format-check: ## Run Format using nightly toolchain but only in check mode
|
||||
cargo +nightly fmt --all --check
|
||||
|
||||
|
||||
.PHONY: lint
|
||||
lint: format fix clippy ## Run all linting tasks at once (Clippy, fixing, formatting)
|
||||
|
||||
# --- docs ----------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: doc
|
||||
doc: ## Generate and check documentation
|
||||
$(WARNINGS) cargo doc --all-features --keep-going --release
|
||||
|
||||
# --- testing -------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: test-default
|
||||
test-default: ## Run tests with default features
|
||||
$(DEBUG_OVERFLOW_INFO) cargo nextest run --profile default --release --all-features
|
||||
|
||||
|
||||
.PHONY: test-no-std
|
||||
test-no-std: ## Run tests with `no-default-features` (std)
|
||||
$(DEBUG_OVERFLOW_INFO) cargo nextest run --profile default --release --no-default-features
|
||||
|
||||
|
||||
.PHONY: test
|
||||
test: test-default test-no-std ## Run all tests
|
||||
|
||||
# --- checking ------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: check
|
||||
check: ## Check all targets and features for errors without code generation
|
||||
cargo check --all-targets --all-features
|
||||
|
||||
# --- building ------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build with default features enabled
|
||||
cargo build --release
|
||||
|
||||
.PHONY: build-no-std
|
||||
build-no-std: ## Build without the standard library
|
||||
cargo build --release --no-default-features --target wasm32-unknown-unknown
|
||||
|
||||
.PHONY: build-avx2
|
||||
build-avx2: ## Build with avx2 support
|
||||
RUSTFLAGS="-C target-feature=+avx2" cargo build --release
|
||||
|
||||
.PHONY: build-sve
|
||||
build-sve: ## Build with sve support
|
||||
RUSTFLAGS="-C target-feature=+sve" cargo build --release
|
||||
|
||||
# --- benchmarking --------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: bench-tx
|
||||
bench-tx: ## Run crypto benchmarks
|
||||
cargo bench --features="concurrent"
|
||||
@@ -1,86 +0,0 @@
|
||||
# Cargo Makefile
|
||||
|
||||
# -- linting --------------------------------------------------------------------------------------
|
||||
[tasks.format]
|
||||
toolchain = "nightly"
|
||||
command = "cargo"
|
||||
args = ["fmt", "--all"]
|
||||
|
||||
[tasks.format-check]
|
||||
toolchain = "nightly"
|
||||
command = "cargo"
|
||||
args = ["fmt", "--all", "--", "--check"]
|
||||
|
||||
[tasks.clippy-default]
|
||||
command = "cargo"
|
||||
args = ["clippy","--workspace", "--all-targets", "--", "-D", "clippy::all", "-D", "warnings"]
|
||||
|
||||
[tasks.clippy-all-features]
|
||||
command = "cargo"
|
||||
args = ["clippy","--workspace", "--all-targets", "--all-features", "--", "-D", "clippy::all", "-D", "warnings"]
|
||||
|
||||
[tasks.clippy]
|
||||
dependencies = [
|
||||
"clippy-default",
|
||||
"clippy-all-features"
|
||||
]
|
||||
|
||||
[tasks.fix]
|
||||
description = "Runs Fix"
|
||||
command = "cargo"
|
||||
toolchain = "nightly"
|
||||
args = ["fix", "--allow-staged", "--allow-dirty", "--all-targets", "--all-features"]
|
||||
|
||||
[tasks.lint]
|
||||
description = "Runs all linting tasks (Clippy, fixing, formatting)"
|
||||
run_task = { name = ["format", "format-check", "clippy", "docs"] }
|
||||
|
||||
# --- docs ----------------------------------------------------------------------------------------
|
||||
[tasks.doc]
|
||||
env = { "RUSTDOCFLAGS" = "-D warnings" }
|
||||
command = "cargo"
|
||||
args = ["doc", "--all-features", "--keep-going", "--release"]
|
||||
|
||||
# --- testing -------------------------------------------------------------------------------------
|
||||
[tasks.test]
|
||||
description = "Run tests with default features"
|
||||
env = { "RUSTFLAGS" = "-C debug-assertions -C overflow-checks -C debuginfo=2" }
|
||||
workspace = false
|
||||
command = "cargo"
|
||||
args = ["test", "--release"]
|
||||
|
||||
[tasks.test-no-default-features]
|
||||
description = "Run tests with no-default-features"
|
||||
env = { "RUSTFLAGS" = "-C debug-assertions -C overflow-checks -C debuginfo=2" }
|
||||
workspace = false
|
||||
command = "cargo"
|
||||
args = ["test", "--release", "--no-default-features"]
|
||||
|
||||
[tasks.test-all]
|
||||
description = "Run all tests"
|
||||
workspace = false
|
||||
run_task = { name = ["test", "test-no-default-features"], parallel = true }
|
||||
|
||||
# --- building ------------------------------------------------------------------------------------
|
||||
[tasks.build]
|
||||
description = "Build in release mode"
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
[tasks.build-no-std]
|
||||
description = "Build using no-std"
|
||||
command = "cargo"
|
||||
args = ["build", "--release", "--no-default-features", "--target", "wasm32-unknown-unknown"]
|
||||
|
||||
[tasks.build-avx2]
|
||||
description = "Build using AVX2 acceleration"
|
||||
env = { "RUSTFLAGS" = "-C target-feature=+avx2" }
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
[tasks.build-sve]
|
||||
description = "Build with SVE acceleration"
|
||||
env = { "RUSTFLAGS" = "-C target-feature=+sve" }
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
75
README.md
75
README.md
@@ -2,85 +2,108 @@
|
||||
|
||||
[](https://github.com/0xPolygonMiden/crypto/blob/main/LICENSE)
|
||||
[](https://github.com/0xPolygonMiden/crypto/actions/workflows/test.yml)
|
||||
[](https://github.com/0xPolygonMiden/crypto/actions/workflows/no-std.yml)
|
||||
[]()
|
||||
[](https://github.com/0xPolygonMiden/crypto/actions/workflows/build.yml)
|
||||
[](https://www.rust-lang.org/tools/install)
|
||||
[](https://crates.io/crates/miden-crypto)
|
||||
|
||||
This crate contains cryptographic primitives used in Polygon Miden.
|
||||
|
||||
## Hash
|
||||
|
||||
[Hash module](./src/hash) provides a set of cryptographic hash functions which are used by the Miden VM and the Miden rollup. Currently, these functions are:
|
||||
|
||||
* [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3.
|
||||
* [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs.
|
||||
* [RPX](https://eprint.iacr.org/2023/1045) hash function with 256-bit output. Similar to RPO, this hash function is suitable for recursive STARKs but it is about 2x faster as compared to RPO.
|
||||
- [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3.
|
||||
- [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs.
|
||||
- [RPX](https://eprint.iacr.org/2023/1045) hash function with 256-bit output. Similar to RPO, this hash function is suitable for recursive STARKs but it is about 2x faster as compared to RPO.
|
||||
|
||||
For performance benchmarks of these hash functions and their comparison to other popular hash functions please see [here](./benches/).
|
||||
|
||||
## Merkle
|
||||
|
||||
[Merkle module](./src/merkle/) provides a set of data structures related to Merkle trees. All these data structures are implemented using the RPO hash function described above. The data structures are:
|
||||
|
||||
* `MerkleStore`: a collection of Merkle trees of different heights designed to efficiently store trees with common subtrees. When instantiated with `RecordingMap`, a Merkle store records all accesses to the original data.
|
||||
* `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64.
|
||||
* `Mmr`: a Merkle mountain range structure designed to function as an append-only log.
|
||||
* `PartialMerkleTree`: a partial view of a Merkle tree where some sub-trees may not be known. This is similar to a collection of Merkle paths all resolving to the same root. The length of the paths can be at most 64.
|
||||
* `PartialMmr`: a partial view of a Merkle mountain range structure.
|
||||
* `SimpleSmt`: a Sparse Merkle Tree (with no compaction), mapping 64-bit keys to 4-element values.
|
||||
* `Smt`: a Sparse Merkle tree (with compaction at depth 64), mapping 4-element keys to 4-element values.
|
||||
- `MerkleStore`: a collection of Merkle trees of different heights designed to efficiently store trees with common subtrees. When instantiated with `RecordingMap`, a Merkle store records all accesses to the original data.
|
||||
- `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64.
|
||||
- `Mmr`: a Merkle mountain range structure designed to function as an append-only log.
|
||||
- `PartialMerkleTree`: a partial view of a Merkle tree where some sub-trees may not be known. This is similar to a collection of Merkle paths all resolving to the same root. The length of the paths can be at most 64.
|
||||
- `PartialMmr`: a partial view of a Merkle mountain range structure.
|
||||
- `SimpleSmt`: a Sparse Merkle Tree (with no compaction), mapping 64-bit keys to 4-element values.
|
||||
- `Smt`: a Sparse Merkle tree (with compaction at depth 64), mapping 4-element keys to 4-element values.
|
||||
|
||||
The module also contains additional supporting components such as `NodeIndex`, `MerklePath`, and `MerkleError` to assist with tree indexation, opening proofs, and reporting inconsistent arguments/state.
|
||||
|
||||
## Signatures
|
||||
|
||||
[DSA module](./src/dsa) provides a set of digital signature schemes supported by default in the Miden VM. Currently, these schemes are:
|
||||
|
||||
* `RPO Falcon512`: a variant of the [Falcon](https://falcon-sign.info/) signature scheme. This variant differs from the standard in that instead of using SHAKE256 hash function in the *hash-to-point* algorithm we use RPO256. This makes the signature more efficient to verify in Miden VM.
|
||||
- `RPO Falcon512`: a variant of the [Falcon](https://falcon-sign.info/) signature scheme. This variant differs from the standard in that instead of using SHAKE256 hash function in the _hash-to-point_ algorithm we use RPO256. This makes the signature more efficient to verify in Miden VM.
|
||||
|
||||
For the above signatures, key generation, signing, and signature verification are available for both `std` and `no_std` contexts (see [crate features](#crate-features) below). However, in `no_std` context, the user is responsible for supplying the key generation and signing procedures with a random number generator.
|
||||
|
||||
## Pseudo-Random Element Generator
|
||||
|
||||
[Pseudo random element generator module](./src/rand/) provides a set of traits and data structures that facilitate generating pseudo-random elements in the context of Miden VM and Miden rollup. The module currently includes:
|
||||
|
||||
* `FeltRng`: a trait for generating random field elements and random 4 field elements.
|
||||
* `RpoRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPO hash function.
|
||||
* `RpxRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPX hash function.
|
||||
- `FeltRng`: a trait for generating random field elements and random 4 field elements.
|
||||
- `RpoRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPO hash function.
|
||||
- `RpxRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPX hash function.
|
||||
|
||||
## Make commands
|
||||
|
||||
We use `make` to automate building, testing, and other processes. In most cases, `make` commands are wrappers around `cargo` commands with specific arguments. You can view the list of available commands in the [Makefile](Makefile), or run the following command:
|
||||
|
||||
```shell
|
||||
make
|
||||
```
|
||||
|
||||
## Crate features
|
||||
|
||||
This crate can be compiled with the following features:
|
||||
|
||||
* `std` - enabled by default and relies on the Rust standard library.
|
||||
* `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
|
||||
- `concurrent`- enabled by default; enables multi-threaded implementation of `Smt::with_entries()` which significantly improves performance on multi-core CPUs.
|
||||
- `std` - enabled by default and relies on the Rust standard library.
|
||||
- `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
|
||||
|
||||
Both of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections.
|
||||
All of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections.
|
||||
|
||||
To compile with `no_std`, disable default features via `--no-default-features` flag.
|
||||
To compile with `no_std`, disable default features via `--no-default-features` flag or using the following command:
|
||||
|
||||
```shell
|
||||
make build-no-std
|
||||
```
|
||||
|
||||
### AVX2 acceleration
|
||||
|
||||
On platforms with [AVX2](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable AVX2 acceleration, the code needs to be compiled with the `avx2` target feature enabled. For example:
|
||||
|
||||
```shell
|
||||
cargo make build-avx2
|
||||
make build-avx2
|
||||
```
|
||||
|
||||
### SVE acceleration
|
||||
On platforms with [SVE](https://en.wikipedia.org/wiki/AArch64#Scalable_Vector_Extension_(SVE)) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable SVE acceleration, the code needs to be compiled with the `sve` target feature enabled. For example:
|
||||
|
||||
On platforms with [SVE](<https://en.wikipedia.org/wiki/AArch64#Scalable_Vector_Extension_(SVE)>) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable SVE acceleration, the code needs to be compiled with the `sve` target feature enabled. For example:
|
||||
|
||||
```shell
|
||||
cargo make build-sve
|
||||
make build-sve
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
The best way to test the library is using our `Makefile.toml` and [cargo-make](https://github.com/sagiegurari/cargo-make), this will enable you to use our pre-defined optimized testing commands:
|
||||
The best way to test the library is using our [Makefile](Makefile), this will enable you to use our pre-defined optimized testing commands:
|
||||
|
||||
```shell
|
||||
cargo make test-all
|
||||
make test
|
||||
```
|
||||
|
||||
For example, some of the functions are heavy and might take a while for the tests to complete if using simply `cargo test`. In order to test in release and optimized mode, we have to replicate the test conditions of the development mode so all debug assertions can be verified.
|
||||
|
||||
We do that by enabling some special [flags](https://doc.rust-lang.org/cargo/reference/profiles.html) for the compilation (which we have set as a default in our [Makefile.toml](Makefile.toml)):
|
||||
We do that by enabling some special [flags](https://doc.rust-lang.org/cargo/reference/profiles.html) for the compilation (which we have set as a default in our [Makefile](Makefile)):
|
||||
|
||||
```shell
|
||||
RUSTFLAGS="-C debug-assertions -C overflow-checks -C debuginfo=2" cargo test --release
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is [MIT licensed](./LICENSE).
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
# Miden VM Hash Functions
|
||||
# Benchmarks
|
||||
|
||||
## Miden VM Hash Functions
|
||||
In the Miden VM, we make use of different hash functions. Some of these are "traditional" hash functions, like `BLAKE3`, which are optimized for out-of-STARK performance, while others are algebraic hash functions, like `Rescue Prime`, and are more optimized for a better performance inside the STARK. In what follows, we benchmark several such hash functions and compare against other constructions that are used by other proving systems. More precisely, we benchmark:
|
||||
|
||||
* **BLAKE3** as specified [here](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) and implemented [here](https://github.com/BLAKE3-team/BLAKE3) (with a wrapper exposed via this crate).
|
||||
@@ -8,13 +10,13 @@ In the Miden VM, we make use of different hash functions. Some of these are "tra
|
||||
* **Rescue Prime Optimized (RPO)** as specified [here](https://eprint.iacr.org/2022/1577) and implemented in this crate.
|
||||
* **Rescue Prime Extended (RPX)** a variant of the [xHash](https://eprint.iacr.org/2023/1045) hash function as implemented in this crate.
|
||||
|
||||
## Comparison and Instructions
|
||||
### Comparison and Instructions
|
||||
|
||||
### Comparison
|
||||
#### Comparison
|
||||
We benchmark the above hash functions using two scenarios. The first is a 2-to-1 $(a,b)\mapsto h(a,b)$ hashing where both $a$, $b$ and $h(a,b)$ are the digests corresponding to each of the hash functions.
|
||||
The second scenario is that of sequential hashing where we take a sequence of length $100$ field elements and hash these to produce a single digest. The digests are $4$ field elements in a prime field with modulus $2^{64} - 2^{32} + 1$ (i.e., 32 bytes) for Poseidon, Rescue Prime and RPO, and an array `[u8; 32]` for SHA3 and BLAKE3.
|
||||
|
||||
#### Scenario 1: 2-to-1 hashing `h(a,b)`
|
||||
##### Scenario 1: 2-to-1 hashing `h(a,b)`
|
||||
|
||||
| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | RPX_256 |
|
||||
| ------------------- | ------ | ------- | --------- | --------- | ------- | ------- |
|
||||
@@ -26,7 +28,7 @@ The second scenario is that of sequential hashing where we take a sequence of le
|
||||
| Intel Core i5-8279U | 68 ns | 536 ns | 2.0 µs | 13.6 µs | 8.5 µs | 4.4 µs |
|
||||
| Intel Xeon 8375C | 67 ns | | | | 8.2 µs | |
|
||||
|
||||
#### Scenario 2: Sequential hashing of 100 elements `h([a_0,...,a_99])`
|
||||
##### Scenario 2: Sequential hashing of 100 elements `h([a_0,...,a_99])`
|
||||
|
||||
| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | RPX_256 |
|
||||
| ------------------- | -------| ------- | --------- | --------- | ------- | ------- |
|
||||
@@ -42,7 +44,7 @@ Notes:
|
||||
- On Graviton 3, RPO256 and RPX256 are run with SVE acceleration enabled.
|
||||
- On AMD EPYC 9R14, RPO256 and RPX256 are run with AVX2 acceleration enabled.
|
||||
|
||||
### Instructions
|
||||
#### Instructions
|
||||
Before you can run the benchmarks, you'll need to make sure you have Rust [installed](https://www.rust-lang.org/tools/install). After that, to run the benchmarks for RPO and BLAKE3, clone the current repository, and from the root directory of the repo run the following:
|
||||
|
||||
```
|
||||
@@ -54,3 +56,47 @@ To run the benchmarks for Rescue Prime, Poseidon and SHA3, clone the following [
|
||||
```
|
||||
cargo bench hash
|
||||
```
|
||||
|
||||
## Miden VM DSA
|
||||
|
||||
We make use of the following digital signature algorithms (DSA) in the Miden VM:
|
||||
|
||||
* **RPO-Falcon512** as specified [here](https://falcon-sign.info/falcon.pdf) with the one difference being the use of the RPO hash function for the hash-to-point algorithm (Algorithm 3 in the previous reference) instead of SHAKE256.
|
||||
* **RPO-STARK** as specified [here](https://eprint.iacr.org/2024/1553), where the parameters are the ones for the unique-decoding regime (UDR) with the two differences:
|
||||
* We rely on Conjecture 1 in the [ethSTARK](https://eprint.iacr.org/2021/582) paper.
|
||||
* The number of FRI queries is $30$ and the grinding factor is $12$ bits. Thus using the previous point we can argue that the modified version achieves at least $102$ bits of average-case existential unforgeability security against $2^{113}$-query bound adversaries that can obtain up to $2^{64}$ signatures under the same public key.
|
||||
|
||||
|
||||
|
||||
### Comparison and Instructions
|
||||
|
||||
#### Comparison
|
||||
|
||||
|
||||
##### Key Generation
|
||||
|
||||
| DSA | RPO-Falcon512 | RPO-STARK |
|
||||
| ------------------- | :-----------: | :-------: |
|
||||
| Apple M1 Pro | 590 ms | 6 µs |
|
||||
| Intel Core i5-8279U | 585 ms | 10 µs |
|
||||
|
||||
##### Signature Generation
|
||||
|
||||
| DSA | RPO-Falcon512 | RPO-STARK |
|
||||
| ------------------- | :-----------: | :-------: |
|
||||
| Apple M1 Pro | 1.5 ms | 78 ms |
|
||||
| Intel Core i5-8279U | 1.8 ms | 130 ms |
|
||||
|
||||
##### Signature Verification
|
||||
|
||||
| DSA | RPO-Falcon512 | RPO-STARK |
|
||||
| ------------------- | :-----------: | :-------: |
|
||||
| Apple M1 Pro | 0.7 ms | 4.5 ms |
|
||||
| Intel Core i5-8279U | 1.2 ms | 7.9 ms |
|
||||
|
||||
#### Instructions
|
||||
Before you can run the benchmarks, you'll need to make sure you have Rust [installed](https://www.rust-lang.org/tools/install). After that, to run the benchmarks, clone the current repository, and from the root directory of the repo run the following:
|
||||
|
||||
```
|
||||
cargo bench --bench dsa
|
||||
```
|
||||
88
benches/dsa.rs
Normal file
88
benches/dsa.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
|
||||
use miden_crypto::dsa::{
|
||||
rpo_falcon512::SecretKey as FalconSecretKey, rpo_stark::SecretKey as RpoStarkSecretKey,
|
||||
};
|
||||
use rand_utils::rand_array;
|
||||
|
||||
fn key_gen_falcon(c: &mut Criterion) {
|
||||
c.bench_function("Falcon public key generation", |bench| {
|
||||
bench.iter_batched(|| FalconSecretKey::new(), |sk| sk.public_key(), BatchSize::SmallInput)
|
||||
});
|
||||
|
||||
c.bench_function("Falcon secret key generation", |bench| {
|
||||
bench.iter_batched(|| {}, |_| FalconSecretKey::new(), BatchSize::SmallInput)
|
||||
});
|
||||
}
|
||||
|
||||
fn key_gen_rpo_stark(c: &mut Criterion) {
|
||||
c.bench_function("RPO-STARK public key generation", |bench| {
|
||||
bench.iter_batched(
|
||||
|| RpoStarkSecretKey::random(),
|
||||
|sk| sk.public_key(),
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
c.bench_function("RPO-STARK secret key generation", |bench| {
|
||||
bench.iter_batched(|| {}, |_| RpoStarkSecretKey::random(), BatchSize::SmallInput)
|
||||
});
|
||||
}
|
||||
|
||||
fn signature_gen_falcon(c: &mut Criterion) {
|
||||
c.bench_function("Falcon signature generation", |bench| {
|
||||
bench.iter_batched(
|
||||
|| (FalconSecretKey::new(), rand_array().into()),
|
||||
|(sk, msg)| sk.sign(msg),
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn signature_gen_rpo_stark(c: &mut Criterion) {
|
||||
c.bench_function("RPO-STARK signature generation", |bench| {
|
||||
bench.iter_batched(
|
||||
|| (RpoStarkSecretKey::random(), rand_array().into()),
|
||||
|(sk, msg)| sk.sign(msg),
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn signature_ver_falcon(c: &mut Criterion) {
|
||||
c.bench_function("Falcon signature verification", |bench| {
|
||||
bench.iter_batched(
|
||||
|| {
|
||||
let sk = FalconSecretKey::new();
|
||||
let msg = rand_array().into();
|
||||
(sk.public_key(), msg, sk.sign(msg))
|
||||
},
|
||||
|(pk, msg, sig)| pk.verify(msg, &sig),
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn signature_ver_rpo_stark(c: &mut Criterion) {
|
||||
c.bench_function("RPO-STARK signature verification", |bench| {
|
||||
bench.iter_batched(
|
||||
|| {
|
||||
let sk = RpoStarkSecretKey::random();
|
||||
let msg = rand_array().into();
|
||||
(sk.public_key(), msg, sk.sign(msg))
|
||||
},
|
||||
|(pk, msg, sig)| pk.verify(msg, &sig),
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
dsa_group,
|
||||
key_gen_falcon,
|
||||
key_gen_rpo_stark,
|
||||
signature_gen_falcon,
|
||||
signature_gen_rpo_stark,
|
||||
signature_ver_falcon,
|
||||
signature_ver_rpo_stark
|
||||
);
|
||||
criterion_main!(dsa_group);
|
||||
66
benches/merkle.rs
Normal file
66
benches/merkle.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
//! Benchmark for building a [`miden_crypto::merkle::MerkleTree`]. This is intended to be compared
|
||||
//! with the results from `benches/smt-subtree.rs`, as building a fully balanced Merkle tree with
|
||||
//! 256 leaves should indicate the *absolute best* performance we could *possibly* get for building
|
||||
//! a depth-8 sparse Merkle subtree, though practically speaking building a fully balanced Merkle
|
||||
//! tree will perform better than the sparse version. At the time of this writing (2024/11/24), this
|
||||
//! benchmark is about four times more efficient than the equivalent benchmark in
|
||||
//! `benches/smt-subtree.rs`.
|
||||
use std::{hint, mem, time::Duration};
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
|
||||
use miden_crypto::{merkle::MerkleTree, Felt, Word, ONE};
|
||||
use rand_utils::prng_array;
|
||||
|
||||
fn balanced_merkle_even(c: &mut Criterion) {
|
||||
c.bench_function("balanced-merkle-even", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let entries: Vec<Word> =
|
||||
(0..256).map(|i| [Felt::new(i), ONE, ONE, Felt::new(i)]).collect();
|
||||
assert_eq!(entries.len(), 256);
|
||||
entries
|
||||
},
|
||||
|leaves| {
|
||||
let tree = MerkleTree::new(hint::black_box(leaves)).unwrap();
|
||||
assert_eq!(tree.depth(), 8);
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
fn balanced_merkle_rand(c: &mut Criterion) {
|
||||
let mut seed = [0u8; 32];
|
||||
c.bench_function("balanced-merkle-rand", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let entries: Vec<Word> = (0..256).map(|_| generate_word(&mut seed)).collect();
|
||||
assert_eq!(entries.len(), 256);
|
||||
entries
|
||||
},
|
||||
|leaves| {
|
||||
let tree = MerkleTree::new(hint::black_box(leaves)).unwrap();
|
||||
assert_eq!(tree.depth(), 8);
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = smt_subtree_group;
|
||||
config = Criterion::default()
|
||||
.measurement_time(Duration::from_secs(20))
|
||||
.configure_from_args();
|
||||
targets = balanced_merkle_even, balanced_merkle_rand
|
||||
}
|
||||
criterion_main!(smt_subtree_group);
|
||||
|
||||
// HELPER FUNCTIONS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
fn generate_word(seed: &mut [u8; 32]) -> Word {
|
||||
mem::swap(seed, &mut prng_array(*seed));
|
||||
let nums: [u64; 4] = prng_array(*seed);
|
||||
[Felt::new(nums[0]), Felt::new(nums[1]), Felt::new(nums[2]), Felt::new(nums[3])]
|
||||
}
|
||||
142
benches/smt-subtree.rs
Normal file
142
benches/smt-subtree.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use std::{fmt::Debug, hint, mem, time::Duration};
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
|
||||
use miden_crypto::{
|
||||
hash::rpo::RpoDigest,
|
||||
merkle::{build_subtree_for_bench, NodeIndex, SmtLeaf, SubtreeLeaf, SMT_DEPTH},
|
||||
Felt, Word, ONE,
|
||||
};
|
||||
use rand_utils::prng_array;
|
||||
use winter_utils::Randomizable;
|
||||
|
||||
const PAIR_COUNTS: [u64; 5] = [1, 64, 128, 192, 256];
|
||||
|
||||
fn smt_subtree_even(c: &mut Criterion) {
|
||||
let mut seed = [0u8; 32];
|
||||
|
||||
let mut group = c.benchmark_group("subtree8-even");
|
||||
|
||||
for pair_count in PAIR_COUNTS {
|
||||
let bench_id = BenchmarkId::from_parameter(pair_count);
|
||||
group.bench_with_input(bench_id, &pair_count, |b, &pair_count| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
// Setup.
|
||||
let entries: Vec<(RpoDigest, Word)> = (0..pair_count)
|
||||
.map(|n| {
|
||||
// A single depth-8 subtree can have a maximum of 255 leaves.
|
||||
let leaf_index = ((n as f64 / pair_count as f64) * 255.0) as u64;
|
||||
let key = RpoDigest::new([
|
||||
generate_value(&mut seed),
|
||||
ONE,
|
||||
Felt::new(n),
|
||||
Felt::new(leaf_index),
|
||||
]);
|
||||
let value = generate_word(&mut seed);
|
||||
(key, value)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut leaves: Vec<_> = entries
|
||||
.iter()
|
||||
.map(|(key, value)| {
|
||||
let leaf = SmtLeaf::new_single(*key, *value);
|
||||
let col = NodeIndex::from(leaf.index()).value();
|
||||
let hash = leaf.hash();
|
||||
SubtreeLeaf { col, hash }
|
||||
})
|
||||
.collect();
|
||||
leaves.sort();
|
||||
leaves.dedup_by_key(|leaf| leaf.col);
|
||||
leaves
|
||||
},
|
||||
|leaves| {
|
||||
// Benchmarked function.
|
||||
let (subtree, _) = build_subtree_for_bench(
|
||||
hint::black_box(leaves),
|
||||
hint::black_box(SMT_DEPTH),
|
||||
hint::black_box(SMT_DEPTH),
|
||||
);
|
||||
assert!(!subtree.is_empty());
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn smt_subtree_random(c: &mut Criterion) {
|
||||
let mut seed = [0u8; 32];
|
||||
|
||||
let mut group = c.benchmark_group("subtree8-rand");
|
||||
|
||||
for pair_count in PAIR_COUNTS {
|
||||
let bench_id = BenchmarkId::from_parameter(pair_count);
|
||||
group.bench_with_input(bench_id, &pair_count, |b, &pair_count| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
// Setup.
|
||||
let entries: Vec<(RpoDigest, Word)> = (0..pair_count)
|
||||
.map(|i| {
|
||||
let leaf_index: u8 = generate_value(&mut seed);
|
||||
let key = RpoDigest::new([
|
||||
ONE,
|
||||
ONE,
|
||||
Felt::new(i),
|
||||
Felt::new(leaf_index as u64),
|
||||
]);
|
||||
let value = generate_word(&mut seed);
|
||||
(key, value)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut leaves: Vec<_> = entries
|
||||
.iter()
|
||||
.map(|(key, value)| {
|
||||
let leaf = SmtLeaf::new_single(*key, *value);
|
||||
let col = NodeIndex::from(leaf.index()).value();
|
||||
let hash = leaf.hash();
|
||||
SubtreeLeaf { col, hash }
|
||||
})
|
||||
.collect();
|
||||
leaves.sort();
|
||||
leaves
|
||||
},
|
||||
|leaves| {
|
||||
let (subtree, _) = build_subtree_for_bench(
|
||||
hint::black_box(leaves),
|
||||
hint::black_box(SMT_DEPTH),
|
||||
hint::black_box(SMT_DEPTH),
|
||||
);
|
||||
assert!(!subtree.is_empty());
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = smt_subtree_group;
|
||||
config = Criterion::default()
|
||||
.measurement_time(Duration::from_secs(40))
|
||||
.sample_size(60)
|
||||
.configure_from_args();
|
||||
targets = smt_subtree_even, smt_subtree_random
|
||||
}
|
||||
criterion_main!(smt_subtree_group);
|
||||
|
||||
// HELPER FUNCTIONS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
fn generate_value<T: Copy + Debug + Randomizable>(seed: &mut [u8; 32]) -> T {
|
||||
mem::swap(seed, &mut prng_array(*seed));
|
||||
let value: [T; 1] = rand_utils::prng_array(*seed);
|
||||
value[0]
|
||||
}
|
||||
|
||||
fn generate_word(seed: &mut [u8; 32]) -> Word {
|
||||
mem::swap(seed, &mut prng_array(*seed));
|
||||
let nums: [u64; 4] = prng_array(*seed);
|
||||
[Felt::new(nums[0]), Felt::new(nums[1]), Felt::new(nums[2]), Felt::new(nums[3])]
|
||||
}
|
||||
71
benches/smt-with-entries.rs
Normal file
71
benches/smt-with-entries.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use std::{fmt::Debug, hint, mem, time::Duration};
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
|
||||
use miden_crypto::{hash::rpo::RpoDigest, merkle::Smt, Felt, Word, ONE};
|
||||
use rand_utils::prng_array;
|
||||
use winter_utils::Randomizable;
|
||||
|
||||
// 2^0, 2^4, 2^8, 2^12, 2^16
|
||||
const PAIR_COUNTS: [u64; 6] = [1, 16, 256, 4096, 65536, 1_048_576];
|
||||
|
||||
fn smt_with_entries(c: &mut Criterion) {
|
||||
let mut seed = [0u8; 32];
|
||||
|
||||
let mut group = c.benchmark_group("smt-with-entries");
|
||||
|
||||
for pair_count in PAIR_COUNTS {
|
||||
let bench_id = BenchmarkId::from_parameter(pair_count);
|
||||
group.bench_with_input(bench_id, &pair_count, |b, &pair_count| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
// Setup.
|
||||
prepare_entries(pair_count, &mut seed)
|
||||
},
|
||||
|entries| {
|
||||
// Benchmarked function.
|
||||
Smt::with_entries(hint::black_box(entries)).unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = smt_with_entries_group;
|
||||
config = Criterion::default()
|
||||
//.measurement_time(Duration::from_secs(960))
|
||||
.measurement_time(Duration::from_secs(60))
|
||||
.sample_size(10)
|
||||
.configure_from_args();
|
||||
targets = smt_with_entries
|
||||
}
|
||||
criterion_main!(smt_with_entries_group);
|
||||
|
||||
// HELPER FUNCTIONS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
fn prepare_entries(pair_count: u64, seed: &mut [u8; 32]) -> Vec<(RpoDigest, [Felt; 4])> {
|
||||
let entries: Vec<(RpoDigest, Word)> = (0..pair_count)
|
||||
.map(|i| {
|
||||
let count = pair_count as f64;
|
||||
let idx = ((i as f64 / count) * (count)) as u64;
|
||||
let key = RpoDigest::new([generate_value(seed), ONE, Felt::new(i), Felt::new(idx)]);
|
||||
let value = generate_word(seed);
|
||||
(key, value)
|
||||
})
|
||||
.collect();
|
||||
entries
|
||||
}
|
||||
|
||||
fn generate_value<T: Copy + Debug + Randomizable>(seed: &mut [u8; 32]) -> T {
|
||||
mem::swap(seed, &mut prng_array(*seed));
|
||||
let value: [T; 1] = rand_utils::prng_array(*seed);
|
||||
value[0]
|
||||
}
|
||||
|
||||
fn generate_word(seed: &mut [u8; 32]) -> Word {
|
||||
mem::swap(seed, &mut prng_array(*seed));
|
||||
let nums: [u64; 4] = prng_array(*seed);
|
||||
[Felt::new(nums[0]), Felt::new(nums[1]), Felt::new(nums[2]), Felt::new(nums[3])]
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
1.80
|
||||
5
rust-toolchain.toml
Normal file
5
rust-toolchain.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[toolchain]
|
||||
channel = "1.82"
|
||||
components = ["rustfmt", "rust-src", "clippy"]
|
||||
targets = ["wasm32-unknown-unknown"]
|
||||
profile = "minimal"
|
||||
24
rustfmt.toml
24
rustfmt.toml
@@ -2,20 +2,22 @@ edition = "2021"
|
||||
array_width = 80
|
||||
attr_fn_like_width = 80
|
||||
chain_width = 80
|
||||
#condense_wildcard_suffixes = true
|
||||
#enum_discrim_align_threshold = 40
|
||||
comment_width = 100
|
||||
condense_wildcard_suffixes = true
|
||||
fn_call_width = 80
|
||||
#fn_single_line = true
|
||||
#format_code_in_doc_comments = true
|
||||
#format_macro_matchers = true
|
||||
#format_strings = true
|
||||
#group_imports = "StdExternalCrate"
|
||||
#hex_literal_case = "Lower"
|
||||
#imports_granularity = "Crate"
|
||||
format_code_in_doc_comments = true
|
||||
format_macro_matchers = true
|
||||
group_imports = "StdExternalCrate"
|
||||
hex_literal_case = "Lower"
|
||||
imports_granularity = "Crate"
|
||||
match_block_trailing_comma = true
|
||||
newline_style = "Unix"
|
||||
#normalize_doc_attributes = true
|
||||
#reorder_impl_items = true
|
||||
reorder_imports = true
|
||||
reorder_modules = true
|
||||
single_line_if_else_max_width = 60
|
||||
single_line_let_else_max_width = 60
|
||||
struct_lit_width = 40
|
||||
struct_variant_width = 40
|
||||
use_field_init_shorthand = true
|
||||
use_try_shorthand = true
|
||||
wrap_comments = true
|
||||
|
||||
21
scripts/check-changelog.sh
Executable file
21
scripts/check-changelog.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -uo pipefail
|
||||
|
||||
CHANGELOG_FILE="${1:-CHANGELOG.md}"
|
||||
|
||||
if [ "${NO_CHANGELOG_LABEL}" = "true" ]; then
|
||||
# 'no changelog' set, so finish successfully
|
||||
echo "\"no changelog\" label has been set"
|
||||
exit 0
|
||||
else
|
||||
# a changelog check is required
|
||||
# fail if the diff is empty
|
||||
if git diff --exit-code "origin/${BASE_REF}" -- "${CHANGELOG_FILE}"; then
|
||||
>&2 echo "Changes should come with an entry in the \"CHANGELOG.md\" file. This behavior
|
||||
can be overridden by using the \"no changelog\" label, which is used for changes
|
||||
that are trivial / explicitely stated not to require a changelog entry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "The \"CHANGELOG.md\" file has been updated."
|
||||
fi
|
||||
@@ -1,10 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Check rust-toolchain file
|
||||
TOOLCHAIN_VERSION=$(cat rust-toolchain)
|
||||
# Get rust-toolchain.toml file channel
|
||||
TOOLCHAIN_VERSION=$(grep 'channel' rust-toolchain.toml | sed -E 's/.*"(.*)".*/\1/')
|
||||
|
||||
# Check workspace Cargo.toml file
|
||||
CARGO_VERSION=$(cat Cargo.toml | grep "rust-version" | cut -d '"' -f 2)
|
||||
# Get workspace Cargo.toml file rust-version
|
||||
CARGO_VERSION=$(grep 'rust-version' Cargo.toml | sed -E 's/.*"(.*)".*/\1/')
|
||||
|
||||
# Check version match
|
||||
if [ "$CARGO_VERSION" != "$TOOLCHAIN_VERSION" ]; then
|
||||
echo "Mismatch in Cargo.toml: Expected $TOOLCHAIN_VERSION, found $CARGO_VERSION"
|
||||
exit 1
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//! Digital signature schemes supported by default in the Miden VM.
|
||||
|
||||
pub mod rpo_falcon512;
|
||||
|
||||
pub mod rpo_stark;
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use super::{math::FalconFelt, Nonce, Polynomial, Rpo256, Word, MODULUS, N, ZERO};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use num::Zero;
|
||||
|
||||
use super::{math::FalconFelt, Nonce, Polynomial, Rpo256, Word, MODULUS, N, ZERO};
|
||||
|
||||
// HASH-TO-POINT FUNCTIONS
|
||||
// ================================================================================================
|
||||
|
||||
|
||||
@@ -15,12 +15,13 @@ pub use secret_key::SecretKey;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{dsa::rpo_falcon512::SecretKey, Word, ONE};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use winter_math::FieldElement;
|
||||
use winter_utils::{Deserializable, Serializable};
|
||||
|
||||
use crate::{dsa::rpo_falcon512::SecretKey, Word, ONE};
|
||||
|
||||
#[test]
|
||||
fn test_falcon_verification() {
|
||||
let seed = [0_u8; 32];
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use crate::dsa::rpo_falcon512::FALCON_ENCODING_BITS;
|
||||
use alloc::string::ToString;
|
||||
use core::ops::Deref;
|
||||
|
||||
use num::Zero;
|
||||
|
||||
use super::{
|
||||
super::{Rpo256, LOG_N, N, PK_LEN},
|
||||
ByteReader, ByteWriter, Deserializable, DeserializationError, FalconFelt, Felt, Polynomial,
|
||||
Serializable, Signature, Word,
|
||||
};
|
||||
use alloc::string::ToString;
|
||||
use core::ops::Deref;
|
||||
use num::Zero;
|
||||
use crate::dsa::rpo_falcon512::FALCON_ENCODING_BITS;
|
||||
|
||||
// PUBLIC KEY
|
||||
// ================================================================================================
|
||||
@@ -116,7 +117,7 @@ impl Deserializable for PubKeyPoly {
|
||||
|
||||
if acc_len >= FALCON_ENCODING_BITS {
|
||||
acc_len -= FALCON_ENCODING_BITS;
|
||||
let w = (acc >> acc_len) & 0x3FFF;
|
||||
let w = (acc >> acc_len) & 0x3fff;
|
||||
let element = w.try_into().map_err(|err| {
|
||||
DeserializationError::InvalidValue(format!(
|
||||
"Failed to decode public key: {err}"
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
|
||||
use num::Complex;
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num_complex::Complex64;
|
||||
use rand::Rng;
|
||||
|
||||
use super::{
|
||||
super::{
|
||||
math::{ffldl, ffsampling, gram, normalize_tree, FalconFelt, FastFft, LdlTree, Polynomial},
|
||||
@@ -10,13 +18,6 @@ use super::{
|
||||
use crate::dsa::rpo_falcon512::{
|
||||
hash_to_point::hash_to_point_rpo256, math::ntru_gen, SIG_NONCE_LEN, SK_LEN,
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use num::Complex;
|
||||
use num_complex::Complex64;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
@@ -27,8 +28,10 @@ const WIDTH_SMALL_POLY_COEFFICIENT: usize = 6;
|
||||
// SECRET KEY
|
||||
// ================================================================================================
|
||||
|
||||
/// Represents the secret key for Falcon DSA.
|
||||
///
|
||||
/// The secret key is a quadruple [[g, -f], [G, -F]] of polynomials with integer coefficients. Each
|
||||
/// polynomial is of degree at most N = 512 and computations with these polynomials are done modulo
|
||||
/// polynomial is of degree at most N = 512 and computations with these polynomials is done modulo
|
||||
/// the monic irreducible polynomial ϕ = x^N + 1. The secret key is a basis for a lattice and has
|
||||
/// the property of being short with respect to a certain norm and an upper bound appropriate for
|
||||
/// a given security parameter. The public key on the other hand is another basis for the same
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
|
||||
use alloc::boxed::Box;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num::{One, Zero};
|
||||
use num_complex::{Complex, Complex64};
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
|
||||
|
||||
const SIGMIN: f64 = 1.2778336969128337;
|
||||
|
||||
@@ -80,11 +81,11 @@ pub fn normalize_tree(tree: &mut LdlTree, sigma: f64) {
|
||||
LdlTree::Branch(_ell, left, right) => {
|
||||
normalize_tree(left, sigma);
|
||||
normalize_tree(right, sigma);
|
||||
}
|
||||
},
|
||||
LdlTree::Leaf(vector) => {
|
||||
vector[0] = Complex::new(sigma / vector[0].re.sqrt(), 0.0);
|
||||
vector[1] = Complex64::zero();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +111,7 @@ pub fn ffsampling<R: Rng>(
|
||||
let z0 = Polynomial::<Complex64>::merge_fft(&bold_z0.0, &bold_z0.1);
|
||||
|
||||
(z0, z1)
|
||||
}
|
||||
},
|
||||
LdlTree::Leaf(value) => {
|
||||
let z0 = sampler_z(t.0.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
|
||||
let z1 = sampler_z(t.1.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
|
||||
@@ -118,6 +119,6 @@ pub fn ffsampling<R: Rng>(
|
||||
Polynomial::new(vec![Complex64::new(z0 as f64, 0.0)]),
|
||||
Polynomial::new(vec![Complex64::new(z1 as f64, 0.0)]),
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use super::{field::FalconFelt, polynomial::Polynomial, Inverse};
|
||||
use alloc::vec::Vec;
|
||||
use core::{
|
||||
f64::consts::PI,
|
||||
ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
|
||||
};
|
||||
use num::{One, Zero};
|
||||
use num_complex::Complex64;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num::{One, Zero};
|
||||
use num_complex::Complex64;
|
||||
|
||||
use super::{field::FalconFelt, polynomial::Polynomial, Inverse};
|
||||
|
||||
/// Implements Cyclotomic FFT without bitreversing the outputs, and using precomputed powers of the
|
||||
/// 2n-th primitive root of unity.
|
||||
@@ -73,7 +74,7 @@ where
|
||||
rev
|
||||
}
|
||||
|
||||
/// Computes the first n powers of the 2nth root of unity, and put them in bit-reversed order.
|
||||
/// Computes the first n powers of the 2nd root of unity, and put them in bit-reversed order.
|
||||
#[allow(dead_code)]
|
||||
fn bitreversed_powers(n: usize) -> Vec<Self> {
|
||||
let psi = Self::primitive_root_of_unity(2 * n);
|
||||
@@ -87,7 +88,7 @@ where
|
||||
array
|
||||
}
|
||||
|
||||
/// Computes the first n powers of the 2nth root of unity, invert them, and put them in
|
||||
/// Computes the first n powers of the 2nd root of unity, invert them, and put them in
|
||||
/// bit-reversed order.
|
||||
#[allow(dead_code)]
|
||||
fn bitreversed_powers_inverse(n: usize) -> Vec<Self> {
|
||||
@@ -102,7 +103,8 @@ where
|
||||
array
|
||||
}
|
||||
|
||||
/// Reorders the given elements in the array by reversing the binary expansions of their indices.
|
||||
/// Reorders the given elements in the array by reversing the binary expansions of their
|
||||
/// indices.
|
||||
fn bitreverse_array<T>(array: &mut [T]) {
|
||||
let n = array.len();
|
||||
for i in 0..n {
|
||||
@@ -118,19 +120,14 @@ where
|
||||
///
|
||||
/// Arguments:
|
||||
///
|
||||
/// - a : &mut [Self]
|
||||
/// (a reference to) a mutable array of field elements which is to
|
||||
/// be transformed under the FFT. The transformation happens in-
|
||||
/// place.
|
||||
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
|
||||
/// transformed under the FFT. The transformation happens in- place.
|
||||
///
|
||||
/// - psi_rev: &[Self]
|
||||
/// (a reference to) an array of powers of psi, from 0 to n-1,
|
||||
/// but ordered by bit-reversed index. Here psi is a primitive root
|
||||
/// of order 2n. You can use
|
||||
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this
|
||||
/// trait implementation is not const. For the performance benefit
|
||||
/// you want a precompiled array, which you can get if you can get
|
||||
/// by implementing the same method and marking it "const".
|
||||
/// - psi_rev: &[Self] (a reference to) an array of powers of psi, from 0 to n-1, but ordered
|
||||
/// by bit-reversed index. Here psi is a primitive root of order 2n. You can use
|
||||
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this trait implementation is not
|
||||
/// const. For the performance benefit you want a precompiled array, which you can get if you
|
||||
/// can get by implementing the same method and marking it "const".
|
||||
fn fft(a: &mut [Self], psi_rev: &[Self]) {
|
||||
let n = a.len();
|
||||
let mut t = n;
|
||||
@@ -158,20 +155,15 @@ where
|
||||
///
|
||||
/// Arguments:
|
||||
///
|
||||
/// - a : &mut [Self]
|
||||
/// (a reference to) a mutable array of field elements which is to
|
||||
/// be transformed under the IFFT. The transformation happens in-
|
||||
/// place.
|
||||
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
|
||||
/// transformed under the IFFT. The transformation happens in- place.
|
||||
///
|
||||
/// - psi_inv_rev: &[Self]
|
||||
/// (a reference to) an array of powers of psi^-1, from 0 to n-1,
|
||||
/// but ordered by bit-reversed index. Here psi is a primitive root of
|
||||
/// order 2n. You can use
|
||||
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for
|
||||
/// this purpose, but this trait implementation is not const. For
|
||||
/// the performance benefit you want a precompiled array, which you
|
||||
/// can get if you can get by implementing the same methods and marking
|
||||
/// them "const".
|
||||
/// - psi_inv_rev: &[Self] (a reference to) an array of powers of psi^-1, from 0 to n-1, but
|
||||
/// ordered by bit-reversed index. Here psi is a primitive root of order 2n. You can use
|
||||
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for this purpose, but this
|
||||
/// trait implementation is not const. For the performance benefit you want a precompiled
|
||||
/// array, which you can get if you can get by implementing the same methods and marking them
|
||||
/// "const".
|
||||
fn ifft(a: &mut [Self], psi_inv_rev: &[Self], ninv: Self) {
|
||||
let n = a.len();
|
||||
let mut t = 1;
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use super::{fft::CyclotomicFourier, Inverse, MODULUS};
|
||||
use alloc::string::String;
|
||||
use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
|
||||
|
||||
use num::{One, Zero};
|
||||
|
||||
use super::{fft::CyclotomicFourier, Inverse, MODULUS};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct FalconFelt(u32);
|
||||
|
||||
|
||||
@@ -2,17 +2,19 @@
|
||||
//!
|
||||
//! It uses and acknowledges the work in:
|
||||
//!
|
||||
//! 1. The [reference](https://falcon-sign.info/impl/README.txt.html) implementation by Thomas Pornin.
|
||||
//! 1. The [reference](https://falcon-sign.info/impl/README.txt.html) implementation by Thomas
|
||||
//! Pornin.
|
||||
//! 2. The [Rust](https://github.com/aszepieniec/falcon-rust) implementation by Alan Szepieniec.
|
||||
use super::MODULUS;
|
||||
use alloc::{string::String, vec::Vec};
|
||||
use core::ops::MulAssign;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num::{BigInt, FromPrimitive, One, Zero};
|
||||
use num_complex::Complex64;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use super::MODULUS;
|
||||
|
||||
mod fft;
|
||||
pub use fft::{CyclotomicFourier, FastFft};
|
||||
@@ -152,7 +154,7 @@ fn ntru_solve(
|
||||
{
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
use super::{field::FalconFelt, Inverse};
|
||||
use crate::dsa::rpo_falcon512::{MODULUS, N};
|
||||
use crate::Felt;
|
||||
use alloc::vec::Vec;
|
||||
use core::default::Default;
|
||||
use core::fmt::Debug;
|
||||
use core::ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign};
|
||||
use core::{
|
||||
default::Default,
|
||||
fmt::Debug,
|
||||
ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign},
|
||||
};
|
||||
|
||||
use num::{One, Zero};
|
||||
|
||||
use super::{field::FalconFelt, Inverse};
|
||||
use crate::{
|
||||
dsa::rpo_falcon512::{MODULUS, N},
|
||||
Felt,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Polynomial<F> {
|
||||
pub coefficients: Vec<F>,
|
||||
@@ -134,8 +140,8 @@ impl<
|
||||
Self::new(coefficients)
|
||||
}
|
||||
|
||||
/// Computes the galois adjoint of the polynomial in the cyclotomic ring F\[ X \] / < X^n + 1 > ,
|
||||
/// which corresponds to f(x^2).
|
||||
/// Computes the galois adjoint of the polynomial in the cyclotomic ring F\[ X \] / < X^n + 1 >
|
||||
/// , which corresponds to f(x^2).
|
||||
pub fn galois_adjoint(&self) -> Self {
|
||||
Self::new(
|
||||
self.coefficients
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use core::f64::consts::LN_2;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use rand::Rng;
|
||||
|
||||
/// Samples an integer from {0, ..., 18} according to the distribution χ, which is close to
|
||||
/// the half-Gaussian distribution on the natural numbers with mean 0 and standard deviation
|
||||
@@ -40,18 +40,18 @@ fn approx_exp(x: f64, ccs: f64) -> u64 {
|
||||
// https://eprint.iacr.org/2018/1234
|
||||
// https://github.com/raykzhao/gaussian
|
||||
const C: [u64; 13] = [
|
||||
0x00000004741183A3u64,
|
||||
0x00000036548CFC06u64,
|
||||
0x0000024FDCBF140Au64,
|
||||
0x0000171D939DE045u64,
|
||||
0x0000D00CF58F6F84u64,
|
||||
0x000680681CF796E3u64,
|
||||
0x002D82D8305B0FEAu64,
|
||||
0x011111110E066FD0u64,
|
||||
0x0555555555070F00u64,
|
||||
0x155555555581FF00u64,
|
||||
0x400000000002B400u64,
|
||||
0x7FFFFFFFFFFF4800u64,
|
||||
0x00000004741183a3u64,
|
||||
0x00000036548cfc06u64,
|
||||
0x0000024fdcbf140au64,
|
||||
0x0000171d939de045u64,
|
||||
0x0000d00cf58f6f84u64,
|
||||
0x000680681cf796e3u64,
|
||||
0x002d82d8305b0feau64,
|
||||
0x011111110e066fd0u64,
|
||||
0x0555555555070f00u64,
|
||||
0x155555555581ff00u64,
|
||||
0x400000000002b400u64,
|
||||
0x7fffffffffff4800u64,
|
||||
0x8000000000000000u64,
|
||||
];
|
||||
|
||||
@@ -116,9 +116,10 @@ pub(crate) fn sampler_z<R: Rng>(mu: f64, sigma: f64, sigma_min: f64, rng: &mut R
|
||||
#[cfg(all(test, feature = "std"))]
|
||||
mod test {
|
||||
use alloc::vec::Vec;
|
||||
use rand::RngCore;
|
||||
use std::{thread::sleep, time::Duration};
|
||||
|
||||
use rand::RngCore;
|
||||
|
||||
use super::{approx_exp, ber_exp, sampler_z};
|
||||
|
||||
/// RNG used only for testing purposes, whereby the produced
|
||||
|
||||
@@ -9,9 +9,11 @@ mod keys;
|
||||
mod math;
|
||||
mod signature;
|
||||
|
||||
pub use self::keys::{PubKeyPoly, PublicKey, SecretKey};
|
||||
pub use self::math::Polynomial;
|
||||
pub use self::signature::{Signature, SignatureHeader, SignaturePoly};
|
||||
pub use self::{
|
||||
keys::{PubKeyPoly, PublicKey, SecretKey},
|
||||
math::Polynomial,
|
||||
signature::{Signature, SignatureHeader, SignaturePoly},
|
||||
};
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use core::ops::Deref;
|
||||
|
||||
use num::Zero;
|
||||
|
||||
use super::{
|
||||
hash_to_point::hash_to_point_rpo256,
|
||||
keys::PubKeyPoly,
|
||||
@@ -8,7 +10,6 @@ use super::{
|
||||
ByteReader, ByteWriter, Deserializable, DeserializationError, Felt, Nonce, Rpo256,
|
||||
Serializable, Word, LOG_N, MODULUS, N, SIG_L2_BOUND, SIG_POLY_BYTE_LEN,
|
||||
};
|
||||
use num::Zero;
|
||||
|
||||
// FALCON SIGNATURE
|
||||
// ================================================================================================
|
||||
@@ -38,8 +39,8 @@ use num::Zero;
|
||||
/// The signature is serialized as:
|
||||
/// 1. A header byte specifying the algorithm used to encode the coefficients of the `s2` polynomial
|
||||
/// together with the degree of the irreducible polynomial phi. For RPO Falcon512, the header
|
||||
/// byte is set to `10111001` which differentiates it from the standardized instantiation of
|
||||
/// the Falcon signature.
|
||||
/// byte is set to `10111001` which differentiates it from the standardized instantiation of the
|
||||
/// Falcon signature.
|
||||
/// 2. 40 bytes for the nonce.
|
||||
/// 4. 625 bytes encoding the `s2` polynomial above.
|
||||
///
|
||||
@@ -355,10 +356,11 @@ fn are_coefficients_valid(x: &[i16]) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{super::SecretKey, *};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
|
||||
use super::{super::SecretKey, *};
|
||||
|
||||
#[test]
|
||||
fn test_serialization_round_trip() {
|
||||
let seed = [0_u8; 32];
|
||||
|
||||
24
src/dsa/rpo_stark/mod.rs
Normal file
24
src/dsa/rpo_stark/mod.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
mod signature;
|
||||
pub use signature::{PublicKey, SecretKey, Signature};
|
||||
|
||||
mod stark;
|
||||
pub use stark::{PublicInputs, RescueAir};
|
||||
|
||||
// TESTS
|
||||
// ================================================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::SecretKey;
|
||||
use crate::Word;
|
||||
|
||||
#[test]
|
||||
fn test_signature() {
|
||||
let sk = SecretKey::new(Word::default());
|
||||
|
||||
let message = Word::default();
|
||||
let signature = sk.sign(message);
|
||||
let pk = sk.public_key();
|
||||
assert!(pk.verify(message, &signature))
|
||||
}
|
||||
}
|
||||
173
src/dsa/rpo_stark/signature/mod.rs
Normal file
173
src/dsa/rpo_stark/signature/mod.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
use rand::{distributions::Uniform, prelude::Distribution, Rng};
|
||||
use winter_air::{FieldExtension, ProofOptions};
|
||||
use winter_math::{fields::f64::BaseElement, FieldElement};
|
||||
use winter_prover::Proof;
|
||||
use winter_utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
|
||||
|
||||
use crate::{
|
||||
dsa::rpo_stark::stark::RpoSignatureScheme,
|
||||
hash::{rpo::Rpo256, DIGEST_SIZE},
|
||||
StarkField, Word, ZERO,
|
||||
};
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
||||
/// Specifies the parameters of the STARK underlying the signature scheme. These parameters provide
|
||||
/// at least 102 bits of security under the conjectured security of the toy protocol in
|
||||
/// the ethSTARK paper [1].
|
||||
///
|
||||
/// [1]: https://eprint.iacr.org/2021/582
|
||||
pub const PROOF_OPTIONS: ProofOptions =
|
||||
ProofOptions::new(30, 8, 12, FieldExtension::Quadratic, 4, 7, true);
|
||||
|
||||
// PUBLIC KEY
|
||||
// ================================================================================================
|
||||
|
||||
/// A public key for verifying signatures.
|
||||
///
|
||||
/// The public key is a [Word] (i.e., 4 field elements) that is the hash of the secret key.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PublicKey(Word);
|
||||
|
||||
impl PublicKey {
|
||||
/// Returns the [Word] defining the public key.
|
||||
pub fn inner(&self) -> Word {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Verifies the provided signature against provided message and this public key.
|
||||
pub fn verify(&self, message: Word, signature: &Signature) -> bool {
|
||||
signature.verify(message, *self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serializable for PublicKey {
|
||||
fn write_into<W: ByteWriter>(&self, target: &mut W) {
|
||||
self.0.write_into(target);
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for PublicKey {
|
||||
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
|
||||
let pk = <Word>::read_from(source)?;
|
||||
Ok(Self(pk))
|
||||
}
|
||||
}
|
||||
|
||||
// SECRET KEY
|
||||
// ================================================================================================
|
||||
|
||||
/// A secret key for generating signatures.
|
||||
///
|
||||
/// The secret key is a [Word] (i.e., 4 field elements).
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct SecretKey(Word);
|
||||
|
||||
impl SecretKey {
|
||||
/// Generates a secret key from OS-provided randomness.
|
||||
pub fn new(word: Word) -> Self {
|
||||
Self(word)
|
||||
}
|
||||
|
||||
/// Generates a secret key from a [Word].
|
||||
#[cfg(feature = "std")]
|
||||
pub fn random() -> Self {
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
let mut rng = StdRng::from_entropy();
|
||||
Self::with_rng(&mut rng)
|
||||
}
|
||||
|
||||
/// Generates a secret_key using the provided random number generator `Rng`.
|
||||
pub fn with_rng<R: Rng>(rng: &mut R) -> Self {
|
||||
let mut sk = [ZERO; 4];
|
||||
let uni_dist = Uniform::from(0..BaseElement::MODULUS);
|
||||
|
||||
for s in sk.iter_mut() {
|
||||
let sampled_integer = uni_dist.sample(rng);
|
||||
*s = BaseElement::new(sampled_integer);
|
||||
}
|
||||
|
||||
Self(sk)
|
||||
}
|
||||
|
||||
/// Computes the public key corresponding to this secret key.
|
||||
pub fn public_key(&self) -> PublicKey {
|
||||
let mut elements = [BaseElement::ZERO; 8];
|
||||
elements[..DIGEST_SIZE].copy_from_slice(&self.0);
|
||||
let pk = Rpo256::hash_elements(&elements);
|
||||
PublicKey(pk.into())
|
||||
}
|
||||
|
||||
/// Signs a message with this secret key.
|
||||
pub fn sign(&self, message: Word) -> Signature {
|
||||
let signature: RpoSignatureScheme<Rpo256> = RpoSignatureScheme::new(PROOF_OPTIONS);
|
||||
let proof = signature.sign(self.0, message);
|
||||
Signature { proof }
|
||||
}
|
||||
}
|
||||
|
||||
impl Serializable for SecretKey {
|
||||
fn write_into<W: ByteWriter>(&self, target: &mut W) {
|
||||
self.0.write_into(target);
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for SecretKey {
|
||||
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
|
||||
let sk = <Word>::read_from(source)?;
|
||||
Ok(Self(sk))
|
||||
}
|
||||
}
|
||||
|
||||
// SIGNATURE
|
||||
// ================================================================================================
|
||||
|
||||
/// An RPO STARK-based signature over a message.
|
||||
///
|
||||
/// The signature is a STARK proof of knowledge of a pre-image given an image where the map is
|
||||
/// the RPO permutation, the pre-image is the secret key and the image is the public key.
|
||||
/// The current implementation follows the description in [1] but relies on the conjectured security
|
||||
/// of the toy protocol in the ethSTARK paper [2], which gives us using the parameter set
|
||||
/// given in `PROOF_OPTIONS` a signature with $102$ bits of average-case existential unforgeability
|
||||
/// security against $2^{113}$-query bound adversaries that can obtain up to $2^{64}$ signatures
|
||||
/// under the same public key.
|
||||
///
|
||||
/// [1]: https://eprint.iacr.org/2024/1553
|
||||
/// [2]: https://eprint.iacr.org/2021/582
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Signature {
|
||||
proof: Proof,
|
||||
}
|
||||
|
||||
impl Signature {
|
||||
/// Returns the STARK proof constituting the signature.
|
||||
pub fn inner(&self) -> Proof {
|
||||
self.proof.clone()
|
||||
}
|
||||
|
||||
/// Returns true if this signature is a valid signature for the specified message generated
|
||||
/// against the secret key matching the specified public key.
|
||||
pub fn verify(&self, message: Word, pk: PublicKey) -> bool {
|
||||
let signature: RpoSignatureScheme<Rpo256> = RpoSignatureScheme::new(PROOF_OPTIONS);
|
||||
|
||||
let res = signature.verify(pk.inner(), message, self.proof.clone());
|
||||
res.is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serializable for Signature {
|
||||
fn write_into<W: ByteWriter>(&self, target: &mut W) {
|
||||
self.proof.write_into(target);
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for Signature {
|
||||
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
|
||||
let proof = Proof::read_from(source)?;
|
||||
Ok(Self { proof })
|
||||
}
|
||||
}
|
||||
198
src/dsa/rpo_stark/stark/air.rs
Normal file
198
src/dsa/rpo_stark/stark/air.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use winter_math::{fields::f64::BaseElement, FieldElement, ToElements};
|
||||
use winter_prover::{
|
||||
Air, AirContext, Assertion, EvaluationFrame, ProofOptions, TraceInfo,
|
||||
TransitionConstraintDegree,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
hash::{ARK1, ARK2, MDS, STATE_WIDTH},
|
||||
Word, ZERO,
|
||||
};
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
||||
pub const HASH_CYCLE_LEN: usize = 8;
|
||||
|
||||
// AIR
|
||||
// ================================================================================================
|
||||
|
||||
pub struct RescueAir {
|
||||
context: AirContext<BaseElement>,
|
||||
pub_key: Word,
|
||||
}
|
||||
|
||||
impl Air for RescueAir {
|
||||
type BaseField = BaseElement;
|
||||
type PublicInputs = PublicInputs;
|
||||
|
||||
type GkrProof = ();
|
||||
type GkrVerifier = ();
|
||||
|
||||
// CONSTRUCTOR
|
||||
// --------------------------------------------------------------------------------------------
|
||||
fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self {
|
||||
let degrees = vec![
|
||||
// Apply RPO rounds.
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
TransitionConstraintDegree::new(7),
|
||||
];
|
||||
assert_eq!(STATE_WIDTH, trace_info.width());
|
||||
let context = AirContext::new(trace_info, degrees, 12, options);
|
||||
let context = context.set_num_transition_exemptions(1);
|
||||
RescueAir { context, pub_key: pub_inputs.pub_key }
|
||||
}
|
||||
|
||||
fn context(&self) -> &AirContext<Self::BaseField> {
|
||||
&self.context
|
||||
}
|
||||
|
||||
fn evaluate_transition<E: FieldElement + From<Self::BaseField>>(
|
||||
&self,
|
||||
frame: &EvaluationFrame<E>,
|
||||
periodic_values: &[E],
|
||||
result: &mut [E],
|
||||
) {
|
||||
let current = frame.current();
|
||||
let next = frame.next();
|
||||
// expected state width is 12 field elements
|
||||
debug_assert_eq!(STATE_WIDTH, current.len());
|
||||
debug_assert_eq!(STATE_WIDTH, next.len());
|
||||
|
||||
enforce_rpo_round(frame, result, periodic_values);
|
||||
}
|
||||
|
||||
fn get_assertions(&self) -> Vec<Assertion<Self::BaseField>> {
|
||||
let initial_step = 0;
|
||||
let last_step = self.trace_length() - 1;
|
||||
vec![
|
||||
// Assert that the capacity as well as the second half of the rate portion of the state
|
||||
// are initialized to `ZERO`.The first half of the rate is unconstrained as it will
|
||||
// contain the secret key
|
||||
Assertion::single(0, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(1, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(2, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(3, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(8, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(9, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(10, initial_step, Self::BaseField::ZERO),
|
||||
Assertion::single(11, initial_step, Self::BaseField::ZERO),
|
||||
// Assert that the public key is the correct one
|
||||
Assertion::single(4, last_step, self.pub_key[0]),
|
||||
Assertion::single(5, last_step, self.pub_key[1]),
|
||||
Assertion::single(6, last_step, self.pub_key[2]),
|
||||
Assertion::single(7, last_step, self.pub_key[3]),
|
||||
]
|
||||
}
|
||||
|
||||
fn get_periodic_column_values(&self) -> Vec<Vec<Self::BaseField>> {
|
||||
get_round_constants()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PublicInputs {
|
||||
pub(crate) pub_key: Word,
|
||||
pub(crate) msg: Word,
|
||||
}
|
||||
|
||||
impl PublicInputs {
|
||||
pub fn new(pub_key: Word, msg: Word) -> Self {
|
||||
Self { pub_key, msg }
|
||||
}
|
||||
}
|
||||
|
||||
impl ToElements<BaseElement> for PublicInputs {
|
||||
fn to_elements(&self) -> Vec<BaseElement> {
|
||||
let mut res = self.pub_key.to_vec();
|
||||
res.extend_from_slice(self.msg.as_ref());
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
// HELPER EVALUATORS
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
/// Enforces constraints for a single round of the Rescue Prime Optimized hash functions.
|
||||
pub fn enforce_rpo_round<E: FieldElement + From<BaseElement>>(
|
||||
frame: &EvaluationFrame<E>,
|
||||
result: &mut [E],
|
||||
ark: &[E],
|
||||
) {
|
||||
// compute the state that should result from applying the first 5 operations of the RPO round to
|
||||
// the current hash state.
|
||||
let mut step1 = [E::ZERO; STATE_WIDTH];
|
||||
step1.copy_from_slice(frame.current());
|
||||
|
||||
apply_mds(&mut step1);
|
||||
// add constants
|
||||
for i in 0..STATE_WIDTH {
|
||||
step1[i] += ark[i];
|
||||
}
|
||||
apply_sbox(&mut step1);
|
||||
apply_mds(&mut step1);
|
||||
// add constants
|
||||
for i in 0..STATE_WIDTH {
|
||||
step1[i] += ark[STATE_WIDTH + i];
|
||||
}
|
||||
|
||||
// compute the state that should result from applying the inverse of the last operation of the
|
||||
// RPO round to the next step of the computation.
|
||||
let mut step2 = [E::ZERO; STATE_WIDTH];
|
||||
step2.copy_from_slice(frame.next());
|
||||
apply_sbox(&mut step2);
|
||||
|
||||
// make sure that the results are equal.
|
||||
for i in 0..STATE_WIDTH {
|
||||
result[i] = step2[i] - step1[i]
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn apply_sbox<E: FieldElement + From<BaseElement>>(state: &mut [E; STATE_WIDTH]) {
|
||||
state.iter_mut().for_each(|v| {
|
||||
let t2 = v.square();
|
||||
let t4 = t2.square();
|
||||
*v *= t2 * t4;
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn apply_mds<E: FieldElement + From<BaseElement>>(state: &mut [E; STATE_WIDTH]) {
|
||||
let mut result = [E::ZERO; STATE_WIDTH];
|
||||
result.iter_mut().zip(MDS).for_each(|(r, mds_row)| {
|
||||
state.iter().zip(mds_row).for_each(|(&s, m)| {
|
||||
*r += E::from(m) * s;
|
||||
});
|
||||
});
|
||||
*state = result
|
||||
}
|
||||
|
||||
/// Returns RPO round constants arranged in column-major form.
|
||||
pub fn get_round_constants() -> Vec<Vec<BaseElement>> {
|
||||
let mut constants = Vec::new();
|
||||
for _ in 0..(STATE_WIDTH * 2) {
|
||||
constants.push(vec![ZERO; HASH_CYCLE_LEN]);
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0..HASH_CYCLE_LEN - 1 {
|
||||
for j in 0..STATE_WIDTH {
|
||||
constants[j][i] = ARK1[i][j];
|
||||
constants[j + STATE_WIDTH][i] = ARK2[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
constants
|
||||
}
|
||||
98
src/dsa/rpo_stark/stark/mod.rs
Normal file
98
src/dsa/rpo_stark/stark/mod.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use alloc::vec::Vec;
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use prover::RpoSignatureProver;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use winter_crypto::{ElementHasher, SaltedMerkleTree};
|
||||
use winter_math::fields::f64::BaseElement;
|
||||
use winter_prover::{Proof, ProofOptions, Prover};
|
||||
use winter_utils::Serializable;
|
||||
use winter_verifier::{verify, AcceptableOptions, VerifierError};
|
||||
|
||||
use crate::{
|
||||
hash::{rpo::Rpo256, DIGEST_SIZE},
|
||||
rand::RpoRandomCoin,
|
||||
};
|
||||
|
||||
mod air;
|
||||
pub use air::{PublicInputs, RescueAir};
|
||||
mod prover;
|
||||
|
||||
/// Represents an abstract STARK-based signature scheme with knowledge of RPO pre-image as
|
||||
/// the hard relation.
|
||||
pub struct RpoSignatureScheme<H: ElementHasher> {
|
||||
options: ProofOptions,
|
||||
_h: PhantomData<H>,
|
||||
}
|
||||
|
||||
impl<H: ElementHasher<BaseField = BaseElement> + Sync> RpoSignatureScheme<H> {
|
||||
pub fn new(options: ProofOptions) -> Self {
|
||||
RpoSignatureScheme { options, _h: PhantomData }
|
||||
}
|
||||
|
||||
pub fn sign(&self, sk: [BaseElement; DIGEST_SIZE], msg: [BaseElement; DIGEST_SIZE]) -> Proof {
|
||||
// create a prover
|
||||
let prover = RpoSignatureProver::<H>::new(msg, self.options.clone());
|
||||
|
||||
// generate execution trace
|
||||
let trace = prover.build_trace(sk);
|
||||
|
||||
// generate the initial seed for the PRNG used for zero-knowledge
|
||||
let seed: [u8; 32] = generate_seed(sk, msg);
|
||||
|
||||
// generate the proof
|
||||
prover.prove(trace, Some(seed)).expect("failed to generate the signature")
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
pub_key: [BaseElement; DIGEST_SIZE],
|
||||
msg: [BaseElement; DIGEST_SIZE],
|
||||
proof: Proof,
|
||||
) -> Result<(), VerifierError> {
|
||||
// we make sure that the parameters used in generating the proof match the expected ones
|
||||
if *proof.options() != self.options {
|
||||
return Err(VerifierError::UnacceptableProofOptions);
|
||||
}
|
||||
let pub_inputs = PublicInputs { pub_key, msg };
|
||||
let acceptable_options = AcceptableOptions::OptionSet(vec![proof.options().clone()]);
|
||||
verify::<RescueAir, Rpo256, RpoRandomCoin, SaltedMerkleTree<Rpo256, ChaCha20Rng>>(
|
||||
proof,
|
||||
pub_inputs,
|
||||
&acceptable_options,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Deterministically generates a seed for seeding the PRNG used for zero-knowledge.
|
||||
///
|
||||
/// This uses the argument described in [RFC 6979](https://datatracker.ietf.org/doc/html/rfc6979#section-3.5)
|
||||
/// § 3.5 where the concatenation of the private key and the hashed message, i.e., sk || H(m), is
|
||||
/// used in order to construct the initial seed of a PRNG.
|
||||
///
|
||||
/// Note that we hash in also a context string in order to domain separate between different
|
||||
/// instantiations of the signature scheme.
|
||||
#[inline]
|
||||
pub fn generate_seed(sk: [BaseElement; DIGEST_SIZE], msg: [BaseElement; DIGEST_SIZE]) -> [u8; 32] {
|
||||
let context_bytes = "
|
||||
Seed for PRNG used for Zero-knowledge in RPO-STARK signature scheme:
|
||||
1. Version: Conjectured security
|
||||
2. FRI queries: 30
|
||||
3. Blowup factor: 8
|
||||
4. Grinding bits: 12
|
||||
5. Field extension degree: 2
|
||||
6. FRI folding factor: 4
|
||||
7. FRI remainder polynomial max degree: 7
|
||||
"
|
||||
.to_bytes();
|
||||
let sk_bytes = sk.to_bytes();
|
||||
let msg_bytes = msg.to_bytes();
|
||||
|
||||
let total_length = context_bytes.len() + sk_bytes.len() + msg_bytes.len();
|
||||
let mut buffer = Vec::with_capacity(total_length);
|
||||
buffer.extend_from_slice(&context_bytes);
|
||||
buffer.extend_from_slice(&sk_bytes);
|
||||
buffer.extend_from_slice(&msg_bytes);
|
||||
|
||||
blake3::hash(&buffer).into()
|
||||
}
|
||||
148
src/dsa/rpo_stark/stark/prover.rs
Normal file
148
src/dsa/rpo_stark/stark/prover.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use winter_air::{
|
||||
AuxRandElements, ConstraintCompositionCoefficients, PartitionOptions, ZkParameters,
|
||||
};
|
||||
use winter_crypto::{ElementHasher, SaltedMerkleTree};
|
||||
use winter_math::{fields::f64::BaseElement, FieldElement};
|
||||
use winter_prover::{
|
||||
matrix::ColMatrix, CompositionPoly, CompositionPolyTrace, DefaultConstraintCommitment,
|
||||
DefaultConstraintEvaluator, DefaultTraceLde, ProofOptions, Prover, StarkDomain, Trace,
|
||||
TraceInfo, TracePolyTable, TraceTable,
|
||||
};
|
||||
|
||||
use super::air::{PublicInputs, RescueAir, HASH_CYCLE_LEN};
|
||||
use crate::{
|
||||
hash::{rpo::Rpo256, STATE_WIDTH},
|
||||
rand::RpoRandomCoin,
|
||||
Word, ZERO,
|
||||
};
|
||||
|
||||
// PROVER
|
||||
// ================================================================================================
|
||||
|
||||
/// A prover for the RPO STARK-based signature scheme.
|
||||
///
|
||||
/// The signature is based on the the one-wayness of the RPO hash function but it is generic over
|
||||
/// the hash function used for instantiating the random oracle for the BCS transform.
|
||||
pub(crate) struct RpoSignatureProver<H: ElementHasher + Sync> {
|
||||
message: Word,
|
||||
options: ProofOptions,
|
||||
_hasher: PhantomData<H>,
|
||||
}
|
||||
|
||||
impl<H: ElementHasher + Sync> RpoSignatureProver<H> {
|
||||
pub(crate) fn new(message: Word, options: ProofOptions) -> Self {
|
||||
Self { message, options, _hasher: PhantomData }
|
||||
}
|
||||
|
||||
pub(crate) fn build_trace(&self, sk: Word) -> TraceTable<BaseElement> {
|
||||
let mut trace = TraceTable::new(STATE_WIDTH, HASH_CYCLE_LEN);
|
||||
|
||||
trace.fill(
|
||||
|state| {
|
||||
// initialize first half of the rate portion of the state with the secret key
|
||||
state[0] = ZERO;
|
||||
state[1] = ZERO;
|
||||
state[2] = ZERO;
|
||||
state[3] = ZERO;
|
||||
state[4] = sk[0];
|
||||
state[5] = sk[1];
|
||||
state[6] = sk[2];
|
||||
state[7] = sk[3];
|
||||
state[8] = ZERO;
|
||||
state[9] = ZERO;
|
||||
state[10] = ZERO;
|
||||
state[11] = ZERO;
|
||||
},
|
||||
|step, state| {
|
||||
Rpo256::apply_round(
|
||||
state.try_into().expect("should not fail given the size of the array"),
|
||||
step,
|
||||
);
|
||||
},
|
||||
);
|
||||
trace
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: ElementHasher> Prover for RpoSignatureProver<H>
|
||||
where
|
||||
H: ElementHasher<BaseField = BaseElement> + Sync,
|
||||
{
|
||||
type BaseField = BaseElement;
|
||||
type Air = RescueAir;
|
||||
type Trace = TraceTable<BaseElement>;
|
||||
type HashFn = Rpo256;
|
||||
type VC = SaltedMerkleTree<Self::HashFn, Self::ZkPrng>;
|
||||
type RandomCoin = RpoRandomCoin;
|
||||
type TraceLde<E: FieldElement<BaseField = Self::BaseField>> =
|
||||
DefaultTraceLde<E, Self::HashFn, Self::VC>;
|
||||
type ConstraintCommitment<E: FieldElement<BaseField = Self::BaseField>> =
|
||||
DefaultConstraintCommitment<E, Self::HashFn, Self::ZkPrng, Self::VC>;
|
||||
type ConstraintEvaluator<'a, E: FieldElement<BaseField = Self::BaseField>> =
|
||||
DefaultConstraintEvaluator<'a, Self::Air, E>;
|
||||
type ZkPrng = ChaCha20Rng;
|
||||
|
||||
fn get_pub_inputs(&self, trace: &Self::Trace) -> PublicInputs {
|
||||
let last_step = trace.length() - 1;
|
||||
// Note that the message is not part of the execution trace but is part of the public
|
||||
// inputs. This is explained in the reference description of the DSA and intuitively
|
||||
// it is done in order to make sure that the message is part of the Fiat-Shamir
|
||||
// transcript and hence binds the proof/signature to the message
|
||||
PublicInputs {
|
||||
pub_key: [
|
||||
trace.get(4, last_step),
|
||||
trace.get(5, last_step),
|
||||
trace.get(6, last_step),
|
||||
trace.get(7, last_step),
|
||||
],
|
||||
msg: self.message,
|
||||
}
|
||||
}
|
||||
|
||||
fn options(&self) -> &ProofOptions {
|
||||
&self.options
|
||||
}
|
||||
|
||||
fn new_trace_lde<E: FieldElement<BaseField = Self::BaseField>>(
|
||||
&self,
|
||||
trace_info: &TraceInfo,
|
||||
main_trace: &ColMatrix<Self::BaseField>,
|
||||
domain: &StarkDomain<Self::BaseField>,
|
||||
partition_option: PartitionOptions,
|
||||
zk_parameters: Option<ZkParameters>,
|
||||
prng: &mut Option<Self::ZkPrng>,
|
||||
) -> (Self::TraceLde<E>, TracePolyTable<E>) {
|
||||
DefaultTraceLde::new(trace_info, main_trace, domain, partition_option, zk_parameters, prng)
|
||||
}
|
||||
|
||||
fn new_evaluator<'a, E: FieldElement<BaseField = Self::BaseField>>(
|
||||
&self,
|
||||
air: &'a Self::Air,
|
||||
aux_rand_elements: Option<AuxRandElements<E>>,
|
||||
composition_coefficients: ConstraintCompositionCoefficients<E>,
|
||||
) -> Self::ConstraintEvaluator<'a, E> {
|
||||
DefaultConstraintEvaluator::new(air, aux_rand_elements, composition_coefficients)
|
||||
}
|
||||
|
||||
fn build_constraint_commitment<E: FieldElement<BaseField = Self::BaseField>>(
|
||||
&self,
|
||||
composition_poly_trace: CompositionPolyTrace<E>,
|
||||
num_constraint_composition_columns: usize,
|
||||
domain: &StarkDomain<Self::BaseField>,
|
||||
partition_options: PartitionOptions,
|
||||
zk_parameters: Option<ZkParameters>,
|
||||
prng: &mut Option<Self::ZkPrng>,
|
||||
) -> (Self::ConstraintCommitment<E>, CompositionPoly<E>) {
|
||||
DefaultConstraintCommitment::new(
|
||||
composition_poly_trace,
|
||||
num_constraint_composition_columns,
|
||||
domain,
|
||||
partition_options,
|
||||
zk_parameters,
|
||||
prng,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
use alloc::string::String;
|
||||
use alloc::{string::String, vec::Vec};
|
||||
use core::{
|
||||
mem::{size_of, transmute, transmute_copy},
|
||||
ops::Deref,
|
||||
slice::from_raw_parts,
|
||||
slice::{self, from_raw_parts},
|
||||
};
|
||||
|
||||
use super::{Digest, ElementHasher, Felt, FieldElement, Hasher};
|
||||
@@ -33,6 +33,14 @@ const DIGEST20_BYTES: usize = 20;
|
||||
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "&str"))]
|
||||
pub struct Blake3Digest<const N: usize>([u8; N]);
|
||||
|
||||
impl<const N: usize> Blake3Digest<N> {
|
||||
pub fn digests_as_bytes(digests: &[Blake3Digest<N>]) -> &[u8] {
|
||||
let p = digests.as_ptr();
|
||||
let len = digests.len() * N;
|
||||
unsafe { slice::from_raw_parts(p as *const u8, len) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Default for Blake3Digest<N> {
|
||||
fn default() -> Self {
|
||||
Self([0; N])
|
||||
@@ -114,6 +122,10 @@ impl Hasher for Blake3_256 {
|
||||
Self::hash(prepare_merge(values))
|
||||
}
|
||||
|
||||
fn merge_many(values: &[Self::Digest]) -> Self::Digest {
|
||||
Blake3Digest(blake3::hash(Blake3Digest::digests_as_bytes(values)).into())
|
||||
}
|
||||
|
||||
fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest {
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
hasher.update(&seed.0);
|
||||
@@ -174,6 +186,11 @@ impl Hasher for Blake3_192 {
|
||||
Blake3Digest(*shrink_bytes(&blake3::hash(bytes).into()))
|
||||
}
|
||||
|
||||
fn merge_many(values: &[Self::Digest]) -> Self::Digest {
|
||||
let bytes: Vec<u8> = values.iter().flat_map(|v| v.as_bytes()).collect();
|
||||
Blake3Digest(*shrink_bytes(&blake3::hash(&bytes).into()))
|
||||
}
|
||||
|
||||
fn merge(values: &[Self::Digest; 2]) -> Self::Digest {
|
||||
Self::hash(prepare_merge(values))
|
||||
}
|
||||
@@ -242,6 +259,11 @@ impl Hasher for Blake3_160 {
|
||||
Self::hash(prepare_merge(values))
|
||||
}
|
||||
|
||||
fn merge_many(values: &[Self::Digest]) -> Self::Digest {
|
||||
let bytes: Vec<u8> = values.iter().flat_map(|v| v.as_bytes()).collect();
|
||||
Blake3Digest(*shrink_bytes(&blake3::hash(&bytes).into()))
|
||||
}
|
||||
|
||||
fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest {
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
hasher.update(&seed.0);
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use proptest::prelude::*;
|
||||
use rand_utils::rand_vector;
|
||||
|
||||
use super::*;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[test]
|
||||
fn blake3_hash_elements() {
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
//! Cryptographic hash functions used by the Miden VM and the Miden rollup.
|
||||
|
||||
use super::{CubeExtension, Felt, FieldElement, StarkField, ONE, ZERO};
|
||||
use super::{CubeExtension, Felt, FieldElement, StarkField, ZERO};
|
||||
|
||||
pub mod blake;
|
||||
|
||||
mod rescue;
|
||||
pub(crate) use rescue::{ARK1, ARK2, DIGEST_SIZE, MDS, STATE_WIDTH};
|
||||
pub mod rpo {
|
||||
pub use super::rescue::{Rpo256, RpoDigest, RpoDigestError};
|
||||
}
|
||||
|
||||
@@ -4,40 +4,43 @@ use core::arch::x86_64::*;
|
||||
// https://github.com/0xPolygonZero/plonky2/blob/main/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs
|
||||
|
||||
// Preliminary notes:
|
||||
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily
|
||||
// emulated. The method recognizes that for a + b overflowed iff (a + b) < a:
|
||||
// i. res_lo = a_lo + b_lo
|
||||
// ii. carry_mask = res_lo < a_lo
|
||||
// iii. res_hi = a_hi + b_hi - carry_mask
|
||||
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily emulated.
|
||||
// The method recognizes that for a + b overflowed iff (a + b) < a:
|
||||
// 1. res_lo = a_lo + b_lo
|
||||
// 2. carry_mask = res_lo < a_lo
|
||||
// 3. res_hi = a_hi + b_hi - carry_mask
|
||||
//
|
||||
// Notice that carry_mask is subtracted, not added. This is because AVX comparison instructions
|
||||
// return -1 (all bits 1) for true and 0 for false.
|
||||
//
|
||||
// 2. AVX does not have unsigned 64-bit comparisons. Those can be emulated with signed comparisons
|
||||
// by recognizing that a <u b iff a + (1 << 63) <s b + (1 << 63), where the addition wraps around
|
||||
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts
|
||||
// 1 << 63 to enable this trick.
|
||||
// Example: addition with carry.
|
||||
// i. a_lo_s = shift(a_lo)
|
||||
// ii. res_lo_s = a_lo_s + b_lo
|
||||
// iii. carry_mask = res_lo_s <s a_lo_s
|
||||
// iv. res_lo = shift(res_lo_s)
|
||||
// v. res_hi = a_hi + b_hi - carry_mask
|
||||
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition is
|
||||
// shifted if exactly one of the operands is shifted, as is the case on line ii. Line iii.
|
||||
// performs a signed comparison res_lo_s <s a_lo_s on shifted values to emulate unsigned
|
||||
// comparison res_lo <u a_lo on unshifted values. Finally, line iv. reverses the shift so the
|
||||
// result can be returned.
|
||||
// When performing a chain of calculations, we can often save instructions by letting the shift
|
||||
// propagate through and only undoing it when necessary. For example, to compute the addition of
|
||||
// three two-word (128-bit) numbers we can do:
|
||||
// i. a_lo_s = shift(a_lo)
|
||||
// ii. tmp_lo_s = a_lo_s + b_lo
|
||||
// iii. tmp_carry_mask = tmp_lo_s <s a_lo_s
|
||||
// iv. tmp_hi = a_hi + b_hi - tmp_carry_mask
|
||||
// v. res_lo_s = tmp_lo_s + c_lo
|
||||
// vi. res_carry_mask = res_lo_s <s tmp_lo_s
|
||||
// vii. res_lo = shift(res_lo_s)
|
||||
// viii. res_hi = tmp_hi + c_hi - res_carry_mask
|
||||
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts 1
|
||||
// << 63 to enable this trick. Addition with carry example:
|
||||
// 1. a_lo_s = shift(a_lo)
|
||||
// 2. res_lo_s = a_lo_s + b_lo
|
||||
// 3. carry_mask = res_lo_s <s a_lo_s
|
||||
// 4. res_lo = shift(res_lo_s)
|
||||
// 5. res_hi = a_hi + b_hi - carry_mask
|
||||
//
|
||||
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition
|
||||
// is shifted if exactly one of the operands is shifted, as is the case on
|
||||
// line 2. Line 3. performs a signed comparison res_lo_s <s a_lo_s on shifted values to
|
||||
// emulate unsigned comparison res_lo <u a_lo on unshifted values. Finally, line 4. reverses the
|
||||
// shift so the result can be returned.
|
||||
//
|
||||
// When performing a chain of calculations, we can often save instructions by letting
|
||||
// the shift propagate through and only undoing it when necessary.
|
||||
// For example, to compute the addition of three two-word (128-bit) numbers we can do:
|
||||
// 1. a_lo_s = shift(a_lo)
|
||||
// 2. tmp_lo_s = a_lo_s + b_lo
|
||||
// 3. tmp_carry_mask = tmp_lo_s <s a_lo_s
|
||||
// 4. tmp_hi = a_hi + b_hi - tmp_carry_mask
|
||||
// 5. res_lo_s = tmp_lo_s + c_lo vi. res_carry_mask = res_lo_s <s tmp_lo_s
|
||||
// 6. res_carry_mask = res_lo_s <s tmp_lo_s
|
||||
// 7. res_lo = shift(res_lo_s)
|
||||
// 8. res_hi = tmp_hi + c_hi - res_carry_mask
|
||||
//
|
||||
// Notice that the above 3-value addition still only requires two calls to shift, just like our
|
||||
// 2-value addition.
|
||||
|
||||
@@ -60,10 +63,10 @@ pub fn branch_hint() {
|
||||
}
|
||||
|
||||
macro_rules! map3 {
|
||||
($f:ident::<$l:literal>, $v:ident) => {
|
||||
($f:ident:: < $l:literal > , $v:ident) => {
|
||||
($f::<$l>($v.0), $f::<$l>($v.1), $f::<$l>($v.2))
|
||||
};
|
||||
($f:ident::<$l:literal>, $v1:ident, $v2:ident) => {
|
||||
($f:ident:: < $l:literal > , $v1:ident, $v2:ident) => {
|
||||
($f::<$l>($v1.0, $v2.0), $f::<$l>($v1.1, $v2.1), $f::<$l>($v1.2, $v2.2))
|
||||
};
|
||||
($f:ident, $v:ident) => {
|
||||
@@ -72,11 +75,11 @@ macro_rules! map3 {
|
||||
($f:ident, $v0:ident, $v1:ident) => {
|
||||
($f($v0.0, $v1.0), $f($v0.1, $v1.1), $f($v0.2, $v1.2))
|
||||
};
|
||||
($f:ident, rep $v0:ident, $v1:ident) => {
|
||||
($f:ident,rep $v0:ident, $v1:ident) => {
|
||||
($f($v0, $v1.0), $f($v0, $v1.1), $f($v0, $v1.2))
|
||||
};
|
||||
|
||||
($f:ident, $v0:ident, rep $v1:ident) => {
|
||||
($f:ident, $v0:ident,rep $v1:ident) => {
|
||||
($f($v0.0, $v1), $f($v0.1, $v1), $f($v0.2, $v1))
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,26 +1,28 @@
|
||||
// FFT-BASED MDS MULTIPLICATION HELPER FUNCTIONS
|
||||
// ================================================================================================
|
||||
|
||||
/// This module contains helper functions as well as constants used to perform the vector-matrix
|
||||
/// multiplication step of the Rescue prime permutation. The special form of our MDS matrix
|
||||
/// i.e. being circular, allows us to reduce the vector-matrix multiplication to a Hadamard product
|
||||
/// of two vectors in "frequency domain". This follows from the simple fact that every circulant
|
||||
/// matrix has the columns of the discrete Fourier transform matrix as orthogonal eigenvectors.
|
||||
/// The implementation also avoids the use of 3-point FFTs, and 3-point iFFTs, and substitutes that
|
||||
/// with explicit expressions. It also avoids, due to the form of our matrix in the frequency domain,
|
||||
/// divisions by 2 and repeated modular reductions. This is because of our explicit choice of
|
||||
/// an MDS matrix that has small powers of 2 entries in frequency domain.
|
||||
/// The following implementation has benefited greatly from the discussions and insights of
|
||||
/// Hamish Ivey-Law and Jacqueline Nabaglo of Polygon Zero and is base on Nabaglo's Plonky2
|
||||
/// implementation.
|
||||
//! This module contains helper functions as well as constants used to perform the vector-matrix
|
||||
//! multiplication step of the Rescue prime permutation. The special form of our MDS matrix
|
||||
//! i.e. being circular, allows us to reduce the vector-matrix multiplication to a Hadamard product
|
||||
//! of two vectors in "frequency domain". This follows from the simple fact that every circulant
|
||||
//! matrix has the columns of the discrete Fourier transform matrix as orthogonal eigenvectors.
|
||||
//! The implementation also avoids the use of 3-point FFTs, and 3-point iFFTs, and substitutes that
|
||||
//! with explicit expressions. It also avoids, due to the form of our matrix in the frequency
|
||||
//! domain, divisions by 2 and repeated modular reductions. This is because of our explicit choice
|
||||
//! of an MDS matrix that has small powers of 2 entries in frequency domain.
|
||||
//! The following implementation has benefited greatly from the discussions and insights of
|
||||
//! Hamish Ivey-Law and Jacqueline Nabaglo of Polygon Zero and is base on Nabaglo's Plonky2
|
||||
//! implementation.
|
||||
|
||||
// Rescue MDS matrix in frequency domain.
|
||||
//
|
||||
// More precisely, this is the output of the three 4-point (real) FFTs of the first column of
|
||||
// the MDS matrix i.e. just before the multiplication with the appropriate twiddle factors
|
||||
// and application of the final four 3-point FFT in order to get the full 12-point FFT.
|
||||
// The entries have been scaled appropriately in order to avoid divisions by 2 in iFFT2 and iFFT4.
|
||||
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to generate
|
||||
// MDS matrices efficiently in original domain, that was developed by the Polygon Zero team.
|
||||
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to
|
||||
// generate MDS matrices efficiently in original domain, that was developed by the Polygon Zero
|
||||
// team.
|
||||
const MDS_FREQ_BLOCK_ONE: [i64; 3] = [16, 8, 16];
|
||||
const MDS_FREQ_BLOCK_TWO: [(i64, i64); 3] = [(-1, 2), (-1, 1), (4, 8)];
|
||||
const MDS_FREQ_BLOCK_THREE: [i64; 3] = [-8, 1, 1];
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use core::ops::Range;
|
||||
|
||||
use super::{
|
||||
CubeExtension, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ONE, ZERO,
|
||||
};
|
||||
use super::{CubeExtension, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ZERO};
|
||||
|
||||
mod arch;
|
||||
pub use arch::optimized::{add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox};
|
||||
|
||||
mod mds;
|
||||
use mds::{apply_mds, MDS};
|
||||
pub(crate) use mds::{apply_mds, MDS};
|
||||
|
||||
mod rpo;
|
||||
pub use rpo::{Rpo256, RpoDigest, RpoDigestError};
|
||||
@@ -28,7 +26,7 @@ const NUM_ROUNDS: usize = 7;
|
||||
|
||||
/// Sponge state is set to 12 field elements or 96 bytes; 8 elements are reserved for rate and
|
||||
/// the remaining 4 elements are reserved for capacity.
|
||||
const STATE_WIDTH: usize = 12;
|
||||
pub(crate) const STATE_WIDTH: usize = 12;
|
||||
|
||||
/// The rate portion of the state is located in elements 4 through 11.
|
||||
const RATE_RANGE: Range<usize> = 4..12;
|
||||
@@ -44,8 +42,8 @@ const CAPACITY_RANGE: Range<usize> = 0..4;
|
||||
///
|
||||
/// The digest is returned from state elements 4, 5, 6, and 7 (the first four elements of the
|
||||
/// rate portion).
|
||||
const DIGEST_RANGE: Range<usize> = 4..8;
|
||||
const DIGEST_SIZE: usize = DIGEST_RANGE.end - DIGEST_RANGE.start;
|
||||
pub(crate) const DIGEST_RANGE: Range<usize> = 4..8;
|
||||
pub(crate) const DIGEST_SIZE: usize = DIGEST_RANGE.end - DIGEST_RANGE.start;
|
||||
|
||||
/// The number of bytes needed to encoded a digest
|
||||
const DIGEST_BYTES: usize = 32;
|
||||
@@ -146,7 +144,7 @@ fn add_constants(state: &mut [Felt; STATE_WIDTH], ark: &[Felt; STATE_WIDTH]) {
|
||||
///
|
||||
/// The constants are broken up into two arrays ARK1 and ARK2; ARK1 contains the constants for the
|
||||
/// first half of RPO round, and ARK2 contains constants for the second half of RPO round.
|
||||
const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
|
||||
pub(crate) const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
|
||||
[
|
||||
Felt::new(5789762306288267392),
|
||||
Felt::new(6522564764413701783),
|
||||
@@ -247,7 +245,7 @@ const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
|
||||
],
|
||||
];
|
||||
|
||||
const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
|
||||
pub(crate) const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
|
||||
[
|
||||
Felt::new(6077062762357204287),
|
||||
Felt::new(15277620170502011191),
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
use alloc::string::String;
|
||||
use core::{cmp::Ordering, fmt::Display, ops::Deref};
|
||||
use core::{cmp::Ordering, fmt::Display, ops::Deref, slice};
|
||||
|
||||
use rand::{
|
||||
distributions::{Standard, Uniform},
|
||||
prelude::Distribution,
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
use super::{Digest, Felt, StarkField, DIGEST_BYTES, DIGEST_SIZE, ZERO};
|
||||
use crate::{
|
||||
@@ -19,6 +25,9 @@ use crate::{
|
||||
pub struct RpoDigest([Felt; DIGEST_SIZE]);
|
||||
|
||||
impl RpoDigest {
|
||||
/// The serialized size of the digest in bytes.
|
||||
pub const SERIALIZED_SIZE: usize = DIGEST_BYTES;
|
||||
|
||||
pub const fn new(value: [Felt; DIGEST_SIZE]) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
@@ -31,13 +40,19 @@ impl RpoDigest {
|
||||
<Self as Digest>::as_bytes(self)
|
||||
}
|
||||
|
||||
pub fn digests_as_elements<'a, I>(digests: I) -> impl Iterator<Item = &'a Felt>
|
||||
pub fn digests_as_elements_iter<'a, I>(digests: I) -> impl Iterator<Item = &'a Felt>
|
||||
where
|
||||
I: Iterator<Item = &'a Self>,
|
||||
{
|
||||
digests.flat_map(|d| d.0.iter())
|
||||
}
|
||||
|
||||
pub fn digests_as_elements(digests: &[Self]) -> &[Felt] {
|
||||
let p = digests.as_ptr();
|
||||
let len = digests.len() * DIGEST_SIZE;
|
||||
unsafe { slice::from_raw_parts(p as *const Felt, len) }
|
||||
}
|
||||
|
||||
/// Returns hexadecimal representation of this digest prefixed with `0x`.
|
||||
pub fn to_hex(&self) -> String {
|
||||
bytes_to_hex_string(self.as_bytes())
|
||||
@@ -115,12 +130,27 @@ impl Randomizable for RpoDigest {
|
||||
}
|
||||
}
|
||||
|
||||
impl Distribution<RpoDigest> for Standard {
|
||||
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> RpoDigest {
|
||||
let mut res = [ZERO; DIGEST_SIZE];
|
||||
let uni_dist = Uniform::from(0..Felt::MODULUS);
|
||||
for r in res.iter_mut() {
|
||||
let sampled_integer = uni_dist.sample(rng);
|
||||
*r = Felt::new(sampled_integer);
|
||||
}
|
||||
RpoDigest::new(res)
|
||||
}
|
||||
}
|
||||
|
||||
// CONVERSIONS: FROM RPO DIGEST
|
||||
// ================================================================================================
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RpoDigestError {
|
||||
InvalidInteger,
|
||||
#[error("failed to convert digest field element to {0}")]
|
||||
TypeConversion(&'static str),
|
||||
#[error("failed to convert to field element: {0}")]
|
||||
InvalidFieldElement(String),
|
||||
}
|
||||
|
||||
impl TryFrom<&RpoDigest> for [bool; DIGEST_SIZE] {
|
||||
@@ -144,10 +174,10 @@ impl TryFrom<RpoDigest> for [bool; DIGEST_SIZE] {
|
||||
}
|
||||
|
||||
Ok([
|
||||
to_bool(value.0[0].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[1].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[2].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[3].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[0].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
|
||||
to_bool(value.0[1].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
|
||||
to_bool(value.0[2].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
|
||||
to_bool(value.0[3].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -165,10 +195,22 @@ impl TryFrom<RpoDigest> for [u8; DIGEST_SIZE] {
|
||||
|
||||
fn try_from(value: RpoDigest) -> Result<Self, Self::Error> {
|
||||
Ok([
|
||||
value.0[0].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[1].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[2].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[3].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[0]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
|
||||
value.0[1]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
|
||||
value.0[2]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
|
||||
value.0[3]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -186,10 +228,22 @@ impl TryFrom<RpoDigest> for [u16; DIGEST_SIZE] {
|
||||
|
||||
fn try_from(value: RpoDigest) -> Result<Self, Self::Error> {
|
||||
Ok([
|
||||
value.0[0].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[1].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[2].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[3].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[0]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
|
||||
value.0[1]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
|
||||
value.0[2]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
|
||||
value.0[3]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -207,10 +261,22 @@ impl TryFrom<RpoDigest> for [u32; DIGEST_SIZE] {
|
||||
|
||||
fn try_from(value: RpoDigest) -> Result<Self, Self::Error> {
|
||||
Ok([
|
||||
value.0[0].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[1].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[2].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[3].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value.0[0]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
|
||||
value.0[1]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
|
||||
value.0[2]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
|
||||
value.0[3]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -334,10 +400,10 @@ impl TryFrom<[u64; DIGEST_SIZE]> for RpoDigest {
|
||||
|
||||
fn try_from(value: [u64; DIGEST_SIZE]) -> Result<Self, RpoDigestError> {
|
||||
Ok(Self([
|
||||
value[0].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value[1].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value[2].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value[3].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
|
||||
value[0].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
|
||||
value[1].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
|
||||
value[2].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
|
||||
value[3].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
|
||||
]))
|
||||
}
|
||||
}
|
||||
@@ -423,6 +489,10 @@ impl Serializable for RpoDigest {
|
||||
fn write_into<W: ByteWriter>(&self, target: &mut W) {
|
||||
target.write_bytes(&self.as_bytes());
|
||||
}
|
||||
|
||||
fn get_size_hint(&self) -> usize {
|
||||
Self::SERIALIZED_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for RpoDigest {
|
||||
@@ -459,6 +529,7 @@ impl IntoIterator for RpoDigest {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::string::String;
|
||||
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{Deserializable, Felt, RpoDigest, Serializable, DIGEST_BYTES, DIGEST_SIZE};
|
||||
@@ -476,6 +547,7 @@ mod tests {
|
||||
let mut bytes = vec![];
|
||||
d1.write_into(&mut bytes);
|
||||
assert_eq!(DIGEST_BYTES, bytes.len());
|
||||
assert_eq!(bytes.len(), d1.get_size_hint());
|
||||
|
||||
let mut reader = SliceReader::new(&bytes);
|
||||
let d2 = RpoDigest::read_from(&mut reader).unwrap();
|
||||
|
||||
@@ -4,7 +4,7 @@ use super::{
|
||||
add_constants, add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox, apply_inv_sbox,
|
||||
apply_mds, apply_sbox, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ARK1,
|
||||
ARK2, BINARY_CHUNK_SIZE, CAPACITY_RANGE, DIGEST_BYTES, DIGEST_RANGE, DIGEST_SIZE, INPUT1_RANGE,
|
||||
INPUT2_RANGE, MDS, NUM_ROUNDS, ONE, RATE_RANGE, RATE_WIDTH, STATE_WIDTH, ZERO,
|
||||
INPUT2_RANGE, MDS, NUM_ROUNDS, RATE_RANGE, RATE_WIDTH, STATE_WIDTH, ZERO,
|
||||
};
|
||||
|
||||
mod digest;
|
||||
@@ -19,7 +19,8 @@ mod tests;
|
||||
/// Implementation of the Rescue Prime Optimized hash function with 256-bit output.
|
||||
///
|
||||
/// The hash function is implemented according to the Rescue Prime Optimized
|
||||
/// [specifications](https://eprint.iacr.org/2022/1577)
|
||||
/// [specifications](https://eprint.iacr.org/2022/1577) while the padding rule follows the one
|
||||
/// described [here](https://eprint.iacr.org/2023/1045).
|
||||
///
|
||||
/// The parameters used to instantiate the function are:
|
||||
/// * Field: 64-bit prime field with modulus p = 2^64 - 2^32 + 1.
|
||||
@@ -51,7 +52,7 @@ mod tests;
|
||||
///
|
||||
/// Thus, if the underlying data consists of valid field elements, it might make more sense
|
||||
/// to deserialize them into field elements and then hash them using
|
||||
/// [hash_elements()](Rpo256::hash_elements) function rather then hashing the serialized bytes
|
||||
/// [hash_elements()](Rpo256::hash_elements) function rather than hashing the serialized bytes
|
||||
/// using [hash()](Rpo256::hash) function.
|
||||
///
|
||||
/// ## Domain separation
|
||||
@@ -64,6 +65,10 @@ mod tests;
|
||||
/// becomes the bottleneck for the security bound of the sponge in overwrite-mode only when it is
|
||||
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
|
||||
/// the size of the domain identifier space, including for padding, is less than 2^128.
|
||||
///
|
||||
/// ## Hashing of empty input
|
||||
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
|
||||
/// the benefit of requiring no calls to the RPO permutation when hashing empty input.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct Rpo256();
|
||||
|
||||
@@ -77,14 +82,16 @@ impl Hasher for Rpo256 {
|
||||
// initialize the state with zeroes
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
|
||||
// set the capacity (first element) to a flag on whether or not the input length is evenly
|
||||
// divided by the rate. this will prevent collisions between padded and non-padded inputs,
|
||||
// and will rule out the need to perform an extra permutation in case of evenly divided
|
||||
// inputs.
|
||||
let is_rate_multiple = bytes.len() % RATE_WIDTH == 0;
|
||||
if !is_rate_multiple {
|
||||
state[CAPACITY_RANGE.start] = ONE;
|
||||
}
|
||||
// determine the number of field elements needed to encode `bytes` when each field element
|
||||
// represents at most 7 bytes.
|
||||
let num_field_elem = bytes.len().div_ceil(BINARY_CHUNK_SIZE);
|
||||
|
||||
// set the first capacity element to `RATE_WIDTH + (num_field_elem % RATE_WIDTH)`. We do
|
||||
// this to achieve:
|
||||
// 1. Domain separating hashing of `[u8]` from hashing of `[Felt]`.
|
||||
// 2. Avoiding collisions at the `[Felt]` representation of the encoded bytes.
|
||||
state[CAPACITY_RANGE.start] =
|
||||
Felt::from((RATE_WIDTH + (num_field_elem % RATE_WIDTH)) as u8);
|
||||
|
||||
// initialize a buffer to receive the little-endian elements.
|
||||
let mut buf = [0_u8; 8];
|
||||
@@ -93,41 +100,49 @@ impl Hasher for Rpo256 {
|
||||
// into the state.
|
||||
//
|
||||
// every time the rate range is filled, a permutation is performed. if the final value of
|
||||
// `i` is not zero, then the chunks count wasn't enough to fill the state range, and an
|
||||
// additional permutation must be performed.
|
||||
let i = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |i, chunk| {
|
||||
// the last element of the iteration may or may not be a full chunk. if it's not, then
|
||||
// we need to pad the remainder bytes of the chunk with zeroes, separated by a `1`.
|
||||
// this will avoid collisions.
|
||||
if chunk.len() == BINARY_CHUNK_SIZE {
|
||||
// `rate_pos` is not zero, then the chunks count wasn't enough to fill the state range,
|
||||
// and an additional permutation must be performed.
|
||||
let mut current_chunk_idx = 0_usize;
|
||||
// handle the case of an empty `bytes`
|
||||
let last_chunk_idx = if num_field_elem == 0 {
|
||||
current_chunk_idx
|
||||
} else {
|
||||
num_field_elem - 1
|
||||
};
|
||||
let rate_pos = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |rate_pos, chunk| {
|
||||
// copy the chunk into the buffer
|
||||
if current_chunk_idx != last_chunk_idx {
|
||||
buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk);
|
||||
} else {
|
||||
// on the last iteration, we pad `buf` with a 1 followed by as many 0's as are
|
||||
// needed to fill it
|
||||
buf.fill(0);
|
||||
buf[..chunk.len()].copy_from_slice(chunk);
|
||||
buf[chunk.len()] = 1;
|
||||
}
|
||||
current_chunk_idx += 1;
|
||||
|
||||
// set the current rate element to the input. since we take at most 7 bytes, we are
|
||||
// guaranteed that the inputs data will fit into a single field element.
|
||||
state[RATE_RANGE.start + i] = Felt::new(u64::from_le_bytes(buf));
|
||||
state[RATE_RANGE.start + rate_pos] = Felt::new(u64::from_le_bytes(buf));
|
||||
|
||||
// proceed filling the range. if it's full, then we apply a permutation and reset the
|
||||
// counter to the beginning of the range.
|
||||
if i == RATE_WIDTH - 1 {
|
||||
if rate_pos == RATE_WIDTH - 1 {
|
||||
Self::apply_permutation(&mut state);
|
||||
0
|
||||
} else {
|
||||
i + 1
|
||||
rate_pos + 1
|
||||
}
|
||||
});
|
||||
|
||||
// if we absorbed some elements but didn't apply a permutation to them (would happen when
|
||||
// the number of elements is not a multiple of RATE_WIDTH), apply the RPO permutation. we
|
||||
// don't need to apply any extra padding because the first capacity element contains a
|
||||
// flag indicating whether the input is evenly divisible by the rate.
|
||||
if i != 0 {
|
||||
state[RATE_RANGE.start + i..RATE_RANGE.end].fill(ZERO);
|
||||
state[RATE_RANGE.start + i] = ONE;
|
||||
// flag indicating the number of field elements constituting the last block when the latter
|
||||
// is not divisible by `RATE_WIDTH`.
|
||||
if rate_pos != 0 {
|
||||
state[RATE_RANGE.start + rate_pos..RATE_RANGE.end].fill(ZERO);
|
||||
Self::apply_permutation(&mut state);
|
||||
}
|
||||
|
||||
@@ -139,7 +154,7 @@ impl Hasher for Rpo256 {
|
||||
// initialize the state by copying the digest elements into the rate portion of the state
|
||||
// (8 total elements), and set the capacity elements to 0.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
let it = Self::Digest::digests_as_elements(values.iter());
|
||||
let it = Self::Digest::digests_as_elements_iter(values.iter());
|
||||
for (i, v) in it.enumerate() {
|
||||
state[RATE_RANGE.start + i] = *v;
|
||||
}
|
||||
@@ -149,29 +164,28 @@ impl Hasher for Rpo256 {
|
||||
RpoDigest::new(state[DIGEST_RANGE].try_into().unwrap())
|
||||
}
|
||||
|
||||
fn merge_many(values: &[Self::Digest]) -> Self::Digest {
|
||||
Self::hash_elements(Self::Digest::digests_as_elements(values))
|
||||
}
|
||||
|
||||
fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest {
|
||||
// initialize the state as follows:
|
||||
// - seed is copied into the first 4 elements of the rate portion of the state.
|
||||
// - if the value fits into a single field element, copy it into the fifth rate element
|
||||
// and set the sixth rate element to 1.
|
||||
// - if the value doesn't fit into a single field element, split it into two field
|
||||
// elements, copy them into rate elements 5 and 6, and set the seventh rate element
|
||||
// to 1.
|
||||
// - set the first capacity element to 1
|
||||
// - if the value fits into a single field element, copy it into the fifth rate element and
|
||||
// set the first capacity element to 5.
|
||||
// - if the value doesn't fit into a single field element, split it into two field elements,
|
||||
// copy them into rate elements 5 and 6 and set the first capacity element to 6.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());
|
||||
state[INPUT2_RANGE.start] = Felt::new(value);
|
||||
if value < Felt::MODULUS {
|
||||
state[INPUT2_RANGE.start + 1] = ONE;
|
||||
state[CAPACITY_RANGE.start] = Felt::from(5_u8);
|
||||
} else {
|
||||
state[INPUT2_RANGE.start + 1] = Felt::new(value / Felt::MODULUS);
|
||||
state[INPUT2_RANGE.start + 2] = ONE;
|
||||
state[CAPACITY_RANGE.start] = Felt::from(6_u8);
|
||||
}
|
||||
|
||||
// common padding for both cases
|
||||
state[CAPACITY_RANGE.start] = ONE;
|
||||
|
||||
// apply the RPO permutation and return the first four elements of the state
|
||||
// apply the RPO permutation and return the first four elements of the rate
|
||||
Self::apply_permutation(&mut state);
|
||||
RpoDigest::new(state[DIGEST_RANGE].try_into().unwrap())
|
||||
}
|
||||
@@ -185,11 +199,9 @@ impl ElementHasher for Rpo256 {
|
||||
let elements = E::slice_as_base_elements(elements);
|
||||
|
||||
// initialize state to all zeros, except for the first element of the capacity part, which
|
||||
// is set to 1 if the number of elements is not a multiple of RATE_WIDTH.
|
||||
// is set to `elements.len() % RATE_WIDTH`.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
if elements.len() % RATE_WIDTH != 0 {
|
||||
state[CAPACITY_RANGE.start] = ONE;
|
||||
}
|
||||
state[CAPACITY_RANGE.start] = Self::BaseField::from((elements.len() % RATE_WIDTH) as u8);
|
||||
|
||||
// absorb elements into the state one by one until the rate portion of the state is filled
|
||||
// up; then apply the Rescue permutation and start absorbing again; repeat until all
|
||||
@@ -206,11 +218,8 @@ impl ElementHasher for Rpo256 {
|
||||
|
||||
// if we absorbed some elements but didn't apply a permutation to them (would happen when
|
||||
// the number of elements is not a multiple of RATE_WIDTH), apply the RPO permutation after
|
||||
// padding by appending a 1 followed by as many 0 as necessary to make the input length a
|
||||
// multiple of the RATE_WIDTH.
|
||||
// padding by as many 0 as necessary to make the input length a multiple of the RATE_WIDTH.
|
||||
if i > 0 {
|
||||
state[RATE_RANGE.start + i] = ONE;
|
||||
i += 1;
|
||||
while i != RATE_WIDTH {
|
||||
state[RATE_RANGE.start + i] = ZERO;
|
||||
i += 1;
|
||||
@@ -285,7 +294,7 @@ impl Rpo256 {
|
||||
// initialize the state by copying the digest elements into the rate portion of the state
|
||||
// (8 total elements), and set the capacity elements to 0.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
let it = RpoDigest::digests_as_elements(values.iter());
|
||||
let it = RpoDigest::digests_as_elements_iter(values.iter());
|
||||
for (i, v) in it.enumerate() {
|
||||
state[RATE_RANGE.start + i] = *v;
|
||||
}
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
use proptest::prelude::*;
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{
|
||||
super::{apply_inv_sbox, apply_sbox, ALPHA, INV_ALPHA},
|
||||
Felt, FieldElement, Hasher, Rpo256, RpoDigest, StarkField, ONE, STATE_WIDTH, ZERO,
|
||||
Felt, FieldElement, Hasher, Rpo256, RpoDigest, StarkField, STATE_WIDTH, ZERO,
|
||||
};
|
||||
use crate::{
|
||||
hash::rescue::{BINARY_CHUNK_SIZE, CAPACITY_RANGE, RATE_WIDTH},
|
||||
Word, ONE,
|
||||
};
|
||||
use crate::Word;
|
||||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
#[test]
|
||||
fn test_sbox() {
|
||||
@@ -58,7 +62,7 @@ fn merge_vs_merge_in_domain() {
|
||||
];
|
||||
let merge_result = Rpo256::merge(&digests);
|
||||
|
||||
// ------------- merge with domain = 0 ----------------------------------------------------------
|
||||
// ------------- merge with domain = 0 -------------
|
||||
|
||||
// set domain to ZERO. This should not change the result.
|
||||
let domain = ZERO;
|
||||
@@ -66,7 +70,7 @@ fn merge_vs_merge_in_domain() {
|
||||
let merge_in_domain_result = Rpo256::merge_in_domain(&digests, domain);
|
||||
assert_eq!(merge_result, merge_in_domain_result);
|
||||
|
||||
// ------------- merge with domain = 1 ----------------------------------------------------------
|
||||
// ------------- merge with domain = 1 -------------
|
||||
|
||||
// set domain to ONE. This should change the result.
|
||||
let domain = ONE;
|
||||
@@ -125,6 +129,27 @@ fn hash_padding() {
|
||||
assert_ne!(r1, r2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_padding_no_extra_permutation_call() {
|
||||
use crate::hash::rescue::DIGEST_RANGE;
|
||||
|
||||
// Implementation
|
||||
let num_bytes = BINARY_CHUNK_SIZE * RATE_WIDTH;
|
||||
let mut buffer = vec![0_u8; num_bytes];
|
||||
*buffer.last_mut().unwrap() = 97;
|
||||
let r1 = Rpo256::hash(&buffer);
|
||||
|
||||
// Expected
|
||||
let final_chunk = [0_u8, 0, 0, 0, 0, 0, 97, 1];
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
// padding when hashing bytes
|
||||
state[CAPACITY_RANGE.start] = Felt::from(RATE_WIDTH as u8);
|
||||
*state.last_mut().unwrap() = Felt::new(u64::from_le_bytes(final_chunk));
|
||||
Rpo256::apply_permutation(&mut state);
|
||||
|
||||
assert_eq!(&r1[0..4], &state[DIGEST_RANGE]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements_padding() {
|
||||
let e1 = [Felt::new(rand_value()); 2];
|
||||
@@ -158,6 +183,24 @@ fn hash_elements() {
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty() {
|
||||
let elements: Vec<Felt> = vec![];
|
||||
|
||||
let zero_digest = RpoDigest::default();
|
||||
let h_result = Rpo256::hash_elements(&elements);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty_bytes() {
|
||||
let bytes: Vec<u8> = vec![];
|
||||
|
||||
let zero_digest = RpoDigest::default();
|
||||
let h_result = Rpo256::hash(&bytes);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_test_vectors() {
|
||||
let elements = [
|
||||
@@ -228,46 +271,46 @@ proptest! {
|
||||
|
||||
const EXPECTED: [Word; 19] = [
|
||||
[
|
||||
Felt::new(1502364727743950833),
|
||||
Felt::new(5880949717274681448),
|
||||
Felt::new(162790463902224431),
|
||||
Felt::new(6901340476773664264),
|
||||
Felt::new(18126731724905382595),
|
||||
Felt::new(7388557040857728717),
|
||||
Felt::new(14290750514634285295),
|
||||
Felt::new(7852282086160480146),
|
||||
],
|
||||
[
|
||||
Felt::new(7478710183745780580),
|
||||
Felt::new(3308077307559720969),
|
||||
Felt::new(3383561985796182409),
|
||||
Felt::new(17205078494700259815),
|
||||
Felt::new(10139303045932500183),
|
||||
Felt::new(2293916558361785533),
|
||||
Felt::new(15496361415980502047),
|
||||
Felt::new(17904948502382283940),
|
||||
],
|
||||
[
|
||||
Felt::new(17439912364295172999),
|
||||
Felt::new(17979156346142712171),
|
||||
Felt::new(8280795511427637894),
|
||||
Felt::new(9349844417834368814),
|
||||
Felt::new(17457546260239634015),
|
||||
Felt::new(803990662839494686),
|
||||
Felt::new(10386005777401424878),
|
||||
Felt::new(18168807883298448638),
|
||||
],
|
||||
[
|
||||
Felt::new(5105868198472766874),
|
||||
Felt::new(13090564195691924742),
|
||||
Felt::new(1058904296915798891),
|
||||
Felt::new(18379501748825152268),
|
||||
Felt::new(13072499238647455740),
|
||||
Felt::new(10174350003422057273),
|
||||
Felt::new(9201651627651151113),
|
||||
Felt::new(6872461887313298746),
|
||||
],
|
||||
[
|
||||
Felt::new(9133662113608941286),
|
||||
Felt::new(12096627591905525991),
|
||||
Felt::new(14963426595993304047),
|
||||
Felt::new(13290205840019973377),
|
||||
Felt::new(2903803350580990546),
|
||||
Felt::new(1838870750730563299),
|
||||
Felt::new(4258619137315479708),
|
||||
Felt::new(17334260395129062936),
|
||||
],
|
||||
[
|
||||
Felt::new(3134262397541159485),
|
||||
Felt::new(10106105871979362399),
|
||||
Felt::new(138768814855329459),
|
||||
Felt::new(15044809212457404677),
|
||||
Felt::new(8571221005243425262),
|
||||
Felt::new(3016595589318175865),
|
||||
Felt::new(13933674291329928438),
|
||||
Felt::new(678640375034313072),
|
||||
],
|
||||
[
|
||||
Felt::new(162696376578462826),
|
||||
Felt::new(4991300494838863586),
|
||||
Felt::new(660346084748120605),
|
||||
Felt::new(13179389528641752698),
|
||||
Felt::new(16314113978986502310),
|
||||
Felt::new(14587622368743051587),
|
||||
Felt::new(2808708361436818462),
|
||||
Felt::new(10660517522478329440),
|
||||
],
|
||||
[
|
||||
Felt::new(2242391899857912644),
|
||||
@@ -276,46 +319,46 @@ const EXPECTED: [Word; 19] = [
|
||||
Felt::new(5046143039268215739),
|
||||
],
|
||||
[
|
||||
Felt::new(9585630502158073976),
|
||||
Felt::new(1310051013427303477),
|
||||
Felt::new(7491921222636097758),
|
||||
Felt::new(9417501558995216762),
|
||||
Felt::new(5218076004221736204),
|
||||
Felt::new(17169400568680971304),
|
||||
Felt::new(8840075572473868990),
|
||||
Felt::new(12382372614369863623),
|
||||
],
|
||||
[
|
||||
Felt::new(1994394001720334744),
|
||||
Felt::new(10866209900885216467),
|
||||
Felt::new(13836092831163031683),
|
||||
Felt::new(10814636682252756697),
|
||||
Felt::new(9783834557155203486),
|
||||
Felt::new(12317263104955018849),
|
||||
Felt::new(3933748931816109604),
|
||||
Felt::new(1843043029836917214),
|
||||
],
|
||||
[
|
||||
Felt::new(17486854790732826405),
|
||||
Felt::new(17376549265955727562),
|
||||
Felt::new(2371059831956435003),
|
||||
Felt::new(17585704935858006533),
|
||||
Felt::new(14498234468286984551),
|
||||
Felt::new(16837257669834682387),
|
||||
Felt::new(6664141123711355107),
|
||||
Felt::new(4590460158294697186),
|
||||
],
|
||||
[
|
||||
Felt::new(11368277489137713825),
|
||||
Felt::new(3906270146963049287),
|
||||
Felt::new(10236262408213059745),
|
||||
Felt::new(78552867005814007),
|
||||
Felt::new(4661800562479916067),
|
||||
Felt::new(11794407552792839953),
|
||||
Felt::new(9037742258721863712),
|
||||
Felt::new(6287820818064278819),
|
||||
],
|
||||
[
|
||||
Felt::new(17899847381280262181),
|
||||
Felt::new(14717912805498651446),
|
||||
Felt::new(10769146203951775298),
|
||||
Felt::new(2774289833490417856),
|
||||
Felt::new(7752693085194633729),
|
||||
Felt::new(7379857372245835536),
|
||||
Felt::new(9270229380648024178),
|
||||
Felt::new(10638301488452560378),
|
||||
],
|
||||
[
|
||||
Felt::new(3794717687462954368),
|
||||
Felt::new(4386865643074822822),
|
||||
Felt::new(8854162840275334305),
|
||||
Felt::new(7129983987107225269),
|
||||
Felt::new(11542686762698783357),
|
||||
Felt::new(15570714990728449027),
|
||||
Felt::new(7518801014067819501),
|
||||
Felt::new(12706437751337583515),
|
||||
],
|
||||
[
|
||||
Felt::new(7244773535611633983),
|
||||
Felt::new(19359923075859320),
|
||||
Felt::new(10898655967774994333),
|
||||
Felt::new(9319339563065736480),
|
||||
Felt::new(9553923701032839042),
|
||||
Felt::new(7281190920209838818),
|
||||
Felt::new(2488477917448393955),
|
||||
Felt::new(5088955350303368837),
|
||||
],
|
||||
[
|
||||
Felt::new(4935426252518736883),
|
||||
@@ -324,21 +367,21 @@ const EXPECTED: [Word; 19] = [
|
||||
Felt::new(18159875708229758073),
|
||||
],
|
||||
[
|
||||
Felt::new(14871230873837295931),
|
||||
Felt::new(11225255908868362971),
|
||||
Felt::new(18100987641405432308),
|
||||
Felt::new(1559244340089644233),
|
||||
Felt::new(12795429638314178838),
|
||||
Felt::new(14360248269767567855),
|
||||
Felt::new(3819563852436765058),
|
||||
Felt::new(10859123583999067291),
|
||||
],
|
||||
[
|
||||
Felt::new(8348203744950016968),
|
||||
Felt::new(4041411241960726733),
|
||||
Felt::new(17584743399305468057),
|
||||
Felt::new(16836952610803537051),
|
||||
Felt::new(2695742617679420093),
|
||||
Felt::new(9151515850666059759),
|
||||
Felt::new(15855828029180595485),
|
||||
Felt::new(17190029785471463210),
|
||||
],
|
||||
[
|
||||
Felt::new(16139797453633030050),
|
||||
Felt::new(1090233424040889412),
|
||||
Felt::new(10770255347785669036),
|
||||
Felt::new(16982398877290254028),
|
||||
Felt::new(13205273108219124830),
|
||||
Felt::new(2524898486192849221),
|
||||
Felt::new(14618764355375283547),
|
||||
Felt::new(10615614265042186874),
|
||||
],
|
||||
];
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use alloc::string::String;
|
||||
use core::{cmp::Ordering, fmt::Display, ops::Deref};
|
||||
use core::{cmp::Ordering, fmt::Display, ops::Deref, slice};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use super::{Digest, Felt, StarkField, DIGEST_BYTES, DIGEST_SIZE, ZERO};
|
||||
use crate::{
|
||||
@@ -19,6 +21,9 @@ use crate::{
|
||||
pub struct RpxDigest([Felt; DIGEST_SIZE]);
|
||||
|
||||
impl RpxDigest {
|
||||
/// The serialized size of the digest in bytes.
|
||||
pub const SERIALIZED_SIZE: usize = DIGEST_BYTES;
|
||||
|
||||
pub const fn new(value: [Felt; DIGEST_SIZE]) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
@@ -31,13 +36,19 @@ impl RpxDigest {
|
||||
<Self as Digest>::as_bytes(self)
|
||||
}
|
||||
|
||||
pub fn digests_as_elements<'a, I>(digests: I) -> impl Iterator<Item = &'a Felt>
|
||||
pub fn digests_as_elements_iter<'a, I>(digests: I) -> impl Iterator<Item = &'a Felt>
|
||||
where
|
||||
I: Iterator<Item = &'a Self>,
|
||||
{
|
||||
digests.flat_map(|d| d.0.iter())
|
||||
}
|
||||
|
||||
pub fn digests_as_elements(digests: &[Self]) -> &[Felt] {
|
||||
let p = digests.as_ptr();
|
||||
let len = digests.len() * DIGEST_SIZE;
|
||||
unsafe { slice::from_raw_parts(p as *const Felt, len) }
|
||||
}
|
||||
|
||||
/// Returns hexadecimal representation of this digest prefixed with `0x`.
|
||||
pub fn to_hex(&self) -> String {
|
||||
bytes_to_hex_string(self.as_bytes())
|
||||
@@ -118,9 +129,12 @@ impl Randomizable for RpxDigest {
|
||||
// CONVERSIONS: FROM RPX DIGEST
|
||||
// ================================================================================================
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RpxDigestError {
|
||||
InvalidInteger,
|
||||
#[error("failed to convert digest field element to {0}")]
|
||||
TypeConversion(&'static str),
|
||||
#[error("failed to convert to field element: {0}")]
|
||||
InvalidFieldElement(String),
|
||||
}
|
||||
|
||||
impl TryFrom<&RpxDigest> for [bool; DIGEST_SIZE] {
|
||||
@@ -144,10 +158,10 @@ impl TryFrom<RpxDigest> for [bool; DIGEST_SIZE] {
|
||||
}
|
||||
|
||||
Ok([
|
||||
to_bool(value.0[0].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[1].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[2].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[3].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
|
||||
to_bool(value.0[0].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
|
||||
to_bool(value.0[1].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
|
||||
to_bool(value.0[2].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
|
||||
to_bool(value.0[3].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -165,10 +179,22 @@ impl TryFrom<RpxDigest> for [u8; DIGEST_SIZE] {
|
||||
|
||||
fn try_from(value: RpxDigest) -> Result<Self, Self::Error> {
|
||||
Ok([
|
||||
value.0[0].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[1].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[2].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[3].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[0]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
|
||||
value.0[1]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
|
||||
value.0[2]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
|
||||
value.0[3]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -186,10 +212,22 @@ impl TryFrom<RpxDigest> for [u16; DIGEST_SIZE] {
|
||||
|
||||
fn try_from(value: RpxDigest) -> Result<Self, Self::Error> {
|
||||
Ok([
|
||||
value.0[0].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[1].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[2].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[3].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[0]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
|
||||
value.0[1]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
|
||||
value.0[2]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
|
||||
value.0[3]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -207,10 +245,22 @@ impl TryFrom<RpxDigest> for [u32; DIGEST_SIZE] {
|
||||
|
||||
fn try_from(value: RpxDigest) -> Result<Self, Self::Error> {
|
||||
Ok([
|
||||
value.0[0].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[1].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[2].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[3].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value.0[0]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
|
||||
value.0[1]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
|
||||
value.0[2]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
|
||||
value.0[3]
|
||||
.as_int()
|
||||
.try_into()
|
||||
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -334,10 +384,10 @@ impl TryFrom<[u64; DIGEST_SIZE]> for RpxDigest {
|
||||
|
||||
fn try_from(value: [u64; DIGEST_SIZE]) -> Result<Self, RpxDigestError> {
|
||||
Ok(Self([
|
||||
value[0].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value[1].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value[2].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value[3].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
|
||||
value[0].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
|
||||
value[1].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
|
||||
value[2].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
|
||||
value[3].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
|
||||
]))
|
||||
}
|
||||
}
|
||||
@@ -423,6 +473,10 @@ impl Serializable for RpxDigest {
|
||||
fn write_into<W: ByteWriter>(&self, target: &mut W) {
|
||||
target.write_bytes(&self.as_bytes());
|
||||
}
|
||||
|
||||
fn get_size_hint(&self) -> usize {
|
||||
Self::SERIALIZED_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for RpxDigest {
|
||||
@@ -459,6 +513,7 @@ impl IntoIterator for RpxDigest {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::string::String;
|
||||
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{Deserializable, Felt, RpxDigest, Serializable, DIGEST_BYTES, DIGEST_SIZE};
|
||||
@@ -476,6 +531,7 @@ mod tests {
|
||||
let mut bytes = vec![];
|
||||
d1.write_into(&mut bytes);
|
||||
assert_eq!(DIGEST_BYTES, bytes.len());
|
||||
assert_eq!(bytes.len(), d1.get_size_hint());
|
||||
|
||||
let mut reader = SliceReader::new(&bytes);
|
||||
let d2 = RpxDigest::read_from(&mut reader).unwrap();
|
||||
|
||||
@@ -11,6 +11,9 @@ use super::{
|
||||
mod digest;
|
||||
pub use digest::{RpxDigest, RpxDigestError};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub type CubicExtElement = CubeExtension<Felt>;
|
||||
|
||||
// HASHER IMPLEMENTATION
|
||||
@@ -26,8 +29,10 @@ pub type CubicExtElement = CubeExtension<Felt>;
|
||||
/// * Capacity size: 4 field elements.
|
||||
/// * S-Box degree: 7.
|
||||
/// * Rounds: There are 3 different types of rounds:
|
||||
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` → `apply_inv_sbox`.
|
||||
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension field).
|
||||
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` →
|
||||
/// `apply_inv_sbox`.
|
||||
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension
|
||||
/// field).
|
||||
/// - (M): `apply_mds` → `add_constants`.
|
||||
/// * Permutation: (FB) (E) (FB) (E) (FB) (E) (M).
|
||||
///
|
||||
@@ -53,7 +58,7 @@ pub type CubicExtElement = CubeExtension<Felt>;
|
||||
///
|
||||
/// Thus, if the underlying data consists of valid field elements, it might make more sense
|
||||
/// to deserialize them into field elements and then hash them using
|
||||
/// [hash_elements()](Rpx256::hash_elements) function rather then hashing the serialized bytes
|
||||
/// [hash_elements()](Rpx256::hash_elements) function rather than hashing the serialized bytes
|
||||
/// using [hash()](Rpx256::hash) function.
|
||||
///
|
||||
/// ## Domain separation
|
||||
@@ -66,6 +71,10 @@ pub type CubicExtElement = CubeExtension<Felt>;
|
||||
/// the bottleneck for the security bound of the sponge in overwrite-mode only when it is
|
||||
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
|
||||
/// the size of the domain identifier space, including for padding, is less than 2^128.
|
||||
///
|
||||
/// ## Hashing of empty input
|
||||
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
|
||||
/// the benefit of requiring no calls to the RPX permutation when hashing empty input.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct Rpx256();
|
||||
|
||||
@@ -97,11 +106,18 @@ impl Hasher for Rpx256 {
|
||||
// into the state.
|
||||
//
|
||||
// every time the rate range is filled, a permutation is performed. if the final value of
|
||||
// `i` is not zero, then the chunks count wasn't enough to fill the state range, and an
|
||||
// additional permutation must be performed.
|
||||
let i = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |i, chunk| {
|
||||
// `rate_pos` is not zero, then the chunks count wasn't enough to fill the state range,
|
||||
// and an additional permutation must be performed.
|
||||
let mut current_chunk_idx = 0_usize;
|
||||
// handle the case of an empty `bytes`
|
||||
let last_chunk_idx = if num_field_elem == 0 {
|
||||
current_chunk_idx
|
||||
} else {
|
||||
num_field_elem - 1
|
||||
};
|
||||
let rate_pos = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |rate_pos, chunk| {
|
||||
// copy the chunk into the buffer
|
||||
if i != num_field_elem - 1 {
|
||||
if current_chunk_idx != last_chunk_idx {
|
||||
buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk);
|
||||
} else {
|
||||
// on the last iteration, we pad `buf` with a 1 followed by as many 0's as are
|
||||
@@ -110,18 +126,19 @@ impl Hasher for Rpx256 {
|
||||
buf[..chunk.len()].copy_from_slice(chunk);
|
||||
buf[chunk.len()] = 1;
|
||||
}
|
||||
current_chunk_idx += 1;
|
||||
|
||||
// set the current rate element to the input. since we take at most 7 bytes, we are
|
||||
// guaranteed that the inputs data will fit into a single field element.
|
||||
state[RATE_RANGE.start + i] = Felt::new(u64::from_le_bytes(buf));
|
||||
state[RATE_RANGE.start + rate_pos] = Felt::new(u64::from_le_bytes(buf));
|
||||
|
||||
// proceed filling the range. if it's full, then we apply a permutation and reset the
|
||||
// counter to the beginning of the range.
|
||||
if i == RATE_WIDTH - 1 {
|
||||
if rate_pos == RATE_WIDTH - 1 {
|
||||
Self::apply_permutation(&mut state);
|
||||
0
|
||||
} else {
|
||||
i + 1
|
||||
rate_pos + 1
|
||||
}
|
||||
});
|
||||
|
||||
@@ -130,8 +147,8 @@ impl Hasher for Rpx256 {
|
||||
// don't need to apply any extra padding because the first capacity element contains a
|
||||
// flag indicating the number of field elements constituting the last block when the latter
|
||||
// is not divisible by `RATE_WIDTH`.
|
||||
if i != 0 {
|
||||
state[RATE_RANGE.start + i..RATE_RANGE.end].fill(ZERO);
|
||||
if rate_pos != 0 {
|
||||
state[RATE_RANGE.start + rate_pos..RATE_RANGE.end].fill(ZERO);
|
||||
Self::apply_permutation(&mut state);
|
||||
}
|
||||
|
||||
@@ -143,7 +160,7 @@ impl Hasher for Rpx256 {
|
||||
// initialize the state by copying the digest elements into the rate portion of the state
|
||||
// (8 total elements), and set the capacity elements to 0.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
let it = Self::Digest::digests_as_elements(values.iter());
|
||||
let it = Self::Digest::digests_as_elements_iter(values.iter());
|
||||
for (i, v) in it.enumerate() {
|
||||
state[RATE_RANGE.start + i] = *v;
|
||||
}
|
||||
@@ -153,13 +170,17 @@ impl Hasher for Rpx256 {
|
||||
RpxDigest::new(state[DIGEST_RANGE].try_into().unwrap())
|
||||
}
|
||||
|
||||
fn merge_many(values: &[Self::Digest]) -> Self::Digest {
|
||||
Self::hash_elements(Self::Digest::digests_as_elements(values))
|
||||
}
|
||||
|
||||
fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest {
|
||||
// initialize the state as follows:
|
||||
// - seed is copied into the first 4 elements of the rate portion of the state.
|
||||
// - if the value fits into a single field element, copy it into the fifth rate element and
|
||||
// set the first capacity element to 5.
|
||||
// - if the value doesn't fit into a single field element, split it into two field
|
||||
// elements, copy them into rate elements 5 and 6 and set the first capacity element to 6.
|
||||
// - if the value doesn't fit into a single field element, split it into two field elements,
|
||||
// copy them into rate elements 5 and 6 and set the first capacity element to 6.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());
|
||||
state[INPUT2_RANGE.start] = Felt::new(value);
|
||||
@@ -170,7 +191,7 @@ impl Hasher for Rpx256 {
|
||||
state[CAPACITY_RANGE.start] = Felt::from(6_u8);
|
||||
}
|
||||
|
||||
// apply the RPX permutation and return the first four elements of the state
|
||||
// apply the RPX permutation and return the first four elements of the rate
|
||||
Self::apply_permutation(&mut state);
|
||||
RpxDigest::new(state[DIGEST_RANGE].try_into().unwrap())
|
||||
}
|
||||
@@ -276,7 +297,7 @@ impl Rpx256 {
|
||||
// initialize the state by copying the digest elements into the rate portion of the state
|
||||
// (8 total elements), and set the capacity elements to 0.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
let it = RpxDigest::digests_as_elements(values.iter());
|
||||
let it = RpxDigest::digests_as_elements_iter(values.iter());
|
||||
for (i, v) in it.enumerate() {
|
||||
state[RATE_RANGE.start + i] = *v;
|
||||
}
|
||||
|
||||
186
src/hash/rescue/rpx/tests.rs
Normal file
186
src/hash/rescue/rpx/tests.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
use proptest::prelude::*;
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{Felt, Hasher, Rpx256, StarkField, ZERO};
|
||||
use crate::{hash::rescue::RpxDigest, ONE};
|
||||
|
||||
#[test]
|
||||
fn hash_elements_vs_merge() {
|
||||
let elements = [Felt::new(rand_value()); 8];
|
||||
|
||||
let digests: [RpxDigest; 2] = [
|
||||
RpxDigest::new(elements[..4].try_into().unwrap()),
|
||||
RpxDigest::new(elements[4..].try_into().unwrap()),
|
||||
];
|
||||
|
||||
let m_result = Rpx256::merge(&digests);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_vs_merge_in_domain() {
|
||||
let elements = [Felt::new(rand_value()); 8];
|
||||
|
||||
let digests: [RpxDigest; 2] = [
|
||||
RpxDigest::new(elements[..4].try_into().unwrap()),
|
||||
RpxDigest::new(elements[4..].try_into().unwrap()),
|
||||
];
|
||||
let merge_result = Rpx256::merge(&digests);
|
||||
|
||||
// ----- merge with domain = 0 ----------------------------------------------------------------
|
||||
|
||||
// set domain to ZERO. This should not change the result.
|
||||
let domain = ZERO;
|
||||
|
||||
let merge_in_domain_result = Rpx256::merge_in_domain(&digests, domain);
|
||||
assert_eq!(merge_result, merge_in_domain_result);
|
||||
|
||||
// ----- merge with domain = 1 ----------------------------------------------------------------
|
||||
|
||||
// set domain to ONE. This should change the result.
|
||||
let domain = ONE;
|
||||
|
||||
let merge_in_domain_result = Rpx256::merge_in_domain(&digests, domain);
|
||||
assert_ne!(merge_result, merge_in_domain_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements_vs_merge_with_int() {
|
||||
let tmp = [Felt::new(rand_value()); 4];
|
||||
let seed = RpxDigest::new(tmp);
|
||||
|
||||
// ----- value fits into a field element ------------------------------------------------------
|
||||
let val: Felt = Felt::new(rand_value());
|
||||
let m_result = Rpx256::merge_with_int(seed, val.as_int());
|
||||
|
||||
let mut elements = seed.as_elements().to_vec();
|
||||
elements.push(val);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
|
||||
assert_eq!(m_result, h_result);
|
||||
|
||||
// ----- value does not fit into a field element ----------------------------------------------
|
||||
let val = Felt::MODULUS + 2;
|
||||
let m_result = Rpx256::merge_with_int(seed, val);
|
||||
|
||||
let mut elements = seed.as_elements().to_vec();
|
||||
elements.push(Felt::new(val));
|
||||
elements.push(ONE);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_padding() {
|
||||
// adding a zero bytes at the end of a byte string should result in a different hash
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
|
||||
// same as above but with bigger inputs
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
|
||||
// same as above but with input splitting over two elements
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
|
||||
// same as above but with multiple zeros
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements_padding() {
|
||||
let e1 = [Felt::new(rand_value()); 2];
|
||||
let e2 = [e1[0], e1[1], ZERO];
|
||||
|
||||
let r1 = Rpx256::hash_elements(&e1);
|
||||
let r2 = Rpx256::hash_elements(&e2);
|
||||
assert_ne!(r1, r2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements() {
|
||||
let elements = [
|
||||
ZERO,
|
||||
ONE,
|
||||
Felt::new(2),
|
||||
Felt::new(3),
|
||||
Felt::new(4),
|
||||
Felt::new(5),
|
||||
Felt::new(6),
|
||||
Felt::new(7),
|
||||
];
|
||||
|
||||
let digests: [RpxDigest; 2] = [
|
||||
RpxDigest::new(elements[..4].try_into().unwrap()),
|
||||
RpxDigest::new(elements[4..8].try_into().unwrap()),
|
||||
];
|
||||
|
||||
let m_result = Rpx256::merge(&digests);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty() {
|
||||
let elements: Vec<Felt> = vec![];
|
||||
|
||||
let zero_digest = RpxDigest::default();
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty_bytes() {
|
||||
let bytes: Vec<u8> = vec![];
|
||||
|
||||
let zero_digest = RpxDigest::default();
|
||||
let h_result = Rpx256::hash(&bytes);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sponge_bytes_with_remainder_length_wont_panic() {
|
||||
// this test targets to assert that no panic will happen with the edge case of having an inputs
|
||||
// with length that is not divisible by the used binary chunk size. 113 is a non-negligible
|
||||
// input length that is prime; hence guaranteed to not be divisible by any choice of chunk
|
||||
// size.
|
||||
//
|
||||
// this is a preliminary test to the fuzzy-stress of proptest.
|
||||
Rpx256::hash(&[0; 113]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sponge_collision_for_wrapped_field_element() {
|
||||
let a = Rpx256::hash(&[0; 8]);
|
||||
let b = Rpx256::hash(&Felt::MODULUS.to_le_bytes());
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sponge_zeroes_collision() {
|
||||
let mut zeroes = Vec::with_capacity(255);
|
||||
let mut set = BTreeSet::new();
|
||||
(0..255).for_each(|_| {
|
||||
let hash = Rpx256::hash(&zeroes);
|
||||
zeroes.push(0);
|
||||
// panic if a collision was found
|
||||
assert!(set.insert(hash));
|
||||
});
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn rpo256_wont_panic_with_arbitrary_input(ref bytes in any::<Vec<u8>>()) {
|
||||
Rpx256::hash(bytes);
|
||||
}
|
||||
}
|
||||
49
src/main.rs
49
src/main.rs
@@ -35,6 +35,7 @@ pub fn benchmark_smt() {
|
||||
|
||||
let mut tree = construction(entries, tree_size).unwrap();
|
||||
insertion(&mut tree, tree_size).unwrap();
|
||||
batched_insertion(&mut tree, tree_size).unwrap();
|
||||
proof_generation(&mut tree, tree_size).unwrap();
|
||||
}
|
||||
|
||||
@@ -82,6 +83,54 @@ pub fn insertion(tree: &mut Smt, size: u64) -> Result<(), MerkleError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn batched_insertion(tree: &mut Smt, size: u64) -> Result<(), MerkleError> {
|
||||
println!("Running a batched insertion benchmark:");
|
||||
|
||||
let new_pairs: Vec<(RpoDigest, Word)> = (0..1000)
|
||||
.map(|i| {
|
||||
let key = Rpo256::hash(&rand_value::<u64>().to_be_bytes());
|
||||
let value = [ONE, ONE, ONE, Felt::new(size + i)];
|
||||
(key, value)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let now = Instant::now();
|
||||
let mutations = tree.compute_mutations(new_pairs);
|
||||
let compute_elapsed = now.elapsed();
|
||||
|
||||
let now = Instant::now();
|
||||
tree.apply_mutations(mutations).unwrap();
|
||||
let apply_elapsed = now.elapsed();
|
||||
|
||||
println!(
|
||||
"An average batch computation time measured by a 1k-batch into an SMT with {} key-value pairs over {:.3} milliseconds is {:.3} milliseconds",
|
||||
size,
|
||||
compute_elapsed.as_secs_f32() * 1000f32,
|
||||
// Dividing by the number of iterations, 1000, and then multiplying by 1000 to get
|
||||
// milliseconds, cancels out.
|
||||
compute_elapsed.as_secs_f32(),
|
||||
);
|
||||
|
||||
println!(
|
||||
"An average batch application time measured by a 1k-batch into an SMT with {} key-value pairs over {:.3} milliseconds is {:.3} milliseconds",
|
||||
size,
|
||||
apply_elapsed.as_secs_f32() * 1000f32,
|
||||
// Dividing by the number of iterations, 1000, and then multiplying by 1000 to get
|
||||
// milliseconds, cancels out.
|
||||
apply_elapsed.as_secs_f32(),
|
||||
);
|
||||
|
||||
println!(
|
||||
"An average batch insertion time measured by a 1k-batch into an SMT with {} key-value pairs totals to {:.3} milliseconds",
|
||||
size,
|
||||
(compute_elapsed + apply_elapsed).as_secs_f32() * 1000f32,
|
||||
);
|
||||
|
||||
println!();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs the proof generation benchmark for the [`Smt`].
|
||||
pub fn proof_generation(tree: &mut Smt, size: u64) -> Result<(), MerkleError> {
|
||||
println!("Running a proof generation benchmark:");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use core::slice;
|
||||
|
||||
use super::{Felt, RpoDigest, EMPTY_WORD};
|
||||
use super::{smt::InnerNode, Felt, RpoDigest, EMPTY_WORD};
|
||||
|
||||
// EMPTY NODES SUBTREES
|
||||
// ================================================================================================
|
||||
@@ -25,6 +25,17 @@ impl EmptySubtreeRoots {
|
||||
let pos = 255 - tree_depth + node_depth;
|
||||
&EMPTY_SUBTREES[pos as usize]
|
||||
}
|
||||
|
||||
/// Returns a sparse Merkle tree [`InnerNode`] with two empty children.
|
||||
///
|
||||
/// # Note
|
||||
/// `node_depth` is the depth of the **parent** to have empty children. That is, `node_depth`
|
||||
/// and the depth of the returned [`InnerNode`] are the same, and thus the empty hashes are for
|
||||
/// subtrees of depth `node_depth + 1`.
|
||||
pub(crate) const fn get_inner_node(tree_depth: u8, node_depth: u8) -> InnerNode {
|
||||
let &child = Self::entry(tree_depth, node_depth + 1);
|
||||
InnerNode { left: child, right: child }
|
||||
}
|
||||
}
|
||||
|
||||
const EMPTY_SUBTREES: [RpoDigest; 256] = [
|
||||
|
||||
@@ -1,65 +1,34 @@
|
||||
use alloc::vec::Vec;
|
||||
use core::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
use super::{smt::SmtLeafError, MerklePath, NodeIndex, RpoDigest};
|
||||
use super::{NodeIndex, RpoDigest};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum MerkleError {
|
||||
ConflictingRoots(Vec<RpoDigest>),
|
||||
#[error("expected merkle root {expected_root} found {actual_root}")]
|
||||
ConflictingRoots {
|
||||
expected_root: RpoDigest,
|
||||
actual_root: RpoDigest,
|
||||
},
|
||||
#[error("provided merkle tree depth {0} is too small")]
|
||||
DepthTooSmall(u8),
|
||||
#[error("provided merkle tree depth {0} is too big")]
|
||||
DepthTooBig(u64),
|
||||
#[error("multiple values provided for merkle tree index {0}")]
|
||||
DuplicateValuesForIndex(u64),
|
||||
DuplicateValuesForKey(RpoDigest),
|
||||
InvalidIndex { depth: u8, value: u64 },
|
||||
InvalidDepth { expected: u8, provided: u8 },
|
||||
InvalidSubtreeDepth { subtree_depth: u8, tree_depth: u8 },
|
||||
InvalidPath(MerklePath),
|
||||
InvalidNumEntries(usize),
|
||||
NodeNotInSet(NodeIndex),
|
||||
NodeNotInStore(RpoDigest, NodeIndex),
|
||||
#[error("node index value {value} is not valid for depth {depth}")]
|
||||
InvalidNodeIndex { depth: u8, value: u64 },
|
||||
#[error("provided node index depth {provided} does not match expected depth {expected}")]
|
||||
InvalidNodeIndexDepth { expected: u8, provided: u8 },
|
||||
#[error("merkle subtree depth {subtree_depth} exceeds merkle tree depth {tree_depth}")]
|
||||
SubtreeDepthExceedsDepth { subtree_depth: u8, tree_depth: u8 },
|
||||
#[error("number of entries in the merkle tree exceeds the maximum of {0}")]
|
||||
TooManyEntries(usize),
|
||||
#[error("node index `{0}` not found in the tree")]
|
||||
NodeIndexNotFoundInTree(NodeIndex),
|
||||
#[error("node {0:?} with index `{1}` not found in the store")]
|
||||
NodeIndexNotFoundInStore(RpoDigest, NodeIndex),
|
||||
#[error("number of provided merkle tree leaves {0} is not a power of two")]
|
||||
NumLeavesNotPowerOfTwo(usize),
|
||||
#[error("root {0:?} is not in the store")]
|
||||
RootNotInStore(RpoDigest),
|
||||
SmtLeaf(SmtLeafError),
|
||||
}
|
||||
|
||||
impl fmt::Display for MerkleError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use MerkleError::*;
|
||||
match self {
|
||||
ConflictingRoots(roots) => write!(f, "the merkle paths roots do not match {roots:?}"),
|
||||
DepthTooSmall(depth) => write!(f, "the provided depth {depth} is too small"),
|
||||
DepthTooBig(depth) => write!(f, "the provided depth {depth} is too big"),
|
||||
DuplicateValuesForIndex(key) => write!(f, "multiple values provided for key {key}"),
|
||||
DuplicateValuesForKey(key) => write!(f, "multiple values provided for key {key}"),
|
||||
InvalidIndex { depth, value } => {
|
||||
write!(f, "the index value {value} is not valid for the depth {depth}")
|
||||
}
|
||||
InvalidDepth { expected, provided } => {
|
||||
write!(f, "the provided depth {provided} is not valid for {expected}")
|
||||
}
|
||||
InvalidSubtreeDepth { subtree_depth, tree_depth } => {
|
||||
write!(f, "tried inserting a subtree of depth {subtree_depth} into a tree of depth {tree_depth}")
|
||||
}
|
||||
InvalidPath(_path) => write!(f, "the provided path is not valid"),
|
||||
InvalidNumEntries(max) => write!(f, "number of entries exceeded the maximum: {max}"),
|
||||
NodeNotInSet(index) => write!(f, "the node with index ({index}) is not in the set"),
|
||||
NodeNotInStore(hash, index) => {
|
||||
write!(f, "the node {hash:?} with index ({index}) is not in the store")
|
||||
}
|
||||
NumLeavesNotPowerOfTwo(leaves) => {
|
||||
write!(f, "the leaves count {leaves} is not a power of 2")
|
||||
}
|
||||
RootNotInStore(root) => write!(f, "the root {:?} is not in the store", root),
|
||||
SmtLeaf(smt_leaf_error) => write!(f, "smt leaf error: {smt_leaf_error}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for MerkleError {}
|
||||
|
||||
impl From<SmtLeafError> for MerkleError {
|
||||
fn from(value: SmtLeafError) -> Self {
|
||||
Self::SmtLeaf(value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ impl NodeIndex {
|
||||
/// Returns an error if the `value` is greater than or equal to 2^{depth}.
|
||||
pub const fn new(depth: u8, value: u64) -> Result<Self, MerkleError> {
|
||||
if (64 - value.leading_zeros()) > depth as u32 {
|
||||
Err(MerkleError::InvalidIndex { depth, value })
|
||||
Err(MerkleError::InvalidNodeIndex { depth, value })
|
||||
} else {
|
||||
Ok(Self { depth, value })
|
||||
}
|
||||
@@ -97,6 +97,14 @@ impl NodeIndex {
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the parent of the current node. This is the same as [`Self::move_up()`], but returns
|
||||
/// a new value instead of mutating `self`.
|
||||
pub const fn parent(mut self) -> Self {
|
||||
self.depth = self.depth.saturating_sub(1);
|
||||
self.value >>= 1;
|
||||
self
|
||||
}
|
||||
|
||||
// PROVIDERS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -182,6 +190,7 @@ impl Deserializable for NodeIndex {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use assert_matches::assert_matches;
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::*;
|
||||
@@ -190,19 +199,19 @@ mod tests {
|
||||
fn test_node_index_value_too_high() {
|
||||
assert_eq!(NodeIndex::new(0, 0).unwrap(), NodeIndex { depth: 0, value: 0 });
|
||||
let err = NodeIndex::new(0, 1).unwrap_err();
|
||||
assert_eq!(err, MerkleError::InvalidIndex { depth: 0, value: 1 });
|
||||
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 0, value: 1 });
|
||||
|
||||
assert_eq!(NodeIndex::new(1, 1).unwrap(), NodeIndex { depth: 1, value: 1 });
|
||||
let err = NodeIndex::new(1, 2).unwrap_err();
|
||||
assert_eq!(err, MerkleError::InvalidIndex { depth: 1, value: 2 });
|
||||
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 1, value: 2 });
|
||||
|
||||
assert_eq!(NodeIndex::new(2, 3).unwrap(), NodeIndex { depth: 2, value: 3 });
|
||||
let err = NodeIndex::new(2, 4).unwrap_err();
|
||||
assert_eq!(err, MerkleError::InvalidIndex { depth: 2, value: 4 });
|
||||
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 2, value: 4 });
|
||||
|
||||
assert_eq!(NodeIndex::new(3, 7).unwrap(), NodeIndex { depth: 3, value: 7 });
|
||||
let err = NodeIndex::new(3, 8).unwrap_err();
|
||||
assert_eq!(err, MerkleError::InvalidIndex { depth: 3, value: 8 });
|
||||
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 3, value: 8 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -211,7 +211,7 @@ pub struct InnerNodeIterator<'a> {
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for InnerNodeIterator<'a> {
|
||||
impl Iterator for InnerNodeIterator<'_> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::super::RpoDigest;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::super::RpoDigest;
|
||||
|
||||
/// Container for the update data of a [super::PartialMmr]
|
||||
#[derive(Debug)]
|
||||
pub struct MmrDelta {
|
||||
|
||||
@@ -1,35 +1,27 @@
|
||||
use core::fmt::{Display, Formatter};
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
use alloc::string::String;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::merkle::MerkleError;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum MmrError {
|
||||
InvalidPosition(usize),
|
||||
InvalidPeaks,
|
||||
InvalidPeak,
|
||||
#[error("mmr does not contain position {0}")]
|
||||
PositionNotFound(usize),
|
||||
#[error("mmr peaks are invalid: {0}")]
|
||||
InvalidPeaks(String),
|
||||
#[error(
|
||||
"mmr peak does not match the computed merkle root of the provided authentication path"
|
||||
)]
|
||||
PeakPathMismatch,
|
||||
#[error("requested peak index is {peak_idx} but the number of peaks is {peaks_len}")]
|
||||
PeakOutOfBounds { peak_idx: usize, peaks_len: usize },
|
||||
#[error("invalid mmr update")]
|
||||
InvalidUpdate,
|
||||
UnknownPeak,
|
||||
MerkleError(MerkleError),
|
||||
#[error("mmr does not contain a peak with depth {0}")]
|
||||
UnknownPeak(u8),
|
||||
#[error("invalid merkle path")]
|
||||
InvalidMerklePath(#[source] MerkleError),
|
||||
#[error("merkle root computation failed")]
|
||||
MerkleRootComputationFailed(#[source] MerkleError),
|
||||
}
|
||||
|
||||
impl Display for MmrError {
|
||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||
match self {
|
||||
MmrError::InvalidPosition(pos) => write!(fmt, "Mmr does not contain position {pos}"),
|
||||
MmrError::InvalidPeaks => write!(fmt, "Invalid peaks count"),
|
||||
MmrError::InvalidPeak => {
|
||||
write!(fmt, "Peak values does not match merkle path computed root")
|
||||
}
|
||||
MmrError::InvalidUpdate => write!(fmt, "Invalid mmr update"),
|
||||
MmrError::UnknownPeak => {
|
||||
write!(fmt, "Peak not in Mmr")
|
||||
}
|
||||
MmrError::MerkleError(err) => write!(fmt, "{}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl Error for MmrError {}
|
||||
|
||||
@@ -10,13 +10,14 @@
|
||||
//! depths, i.e. as part of adding a new element to the forest the trees with same depth are
|
||||
//! merged, creating a new tree with depth d+1, this process is continued until the property is
|
||||
//! reestablished.
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{
|
||||
super::{InnerNodeInfo, MerklePath},
|
||||
bit::TrueBitPositionIterator,
|
||||
leaf_to_corresponding_tree, nodes_in_forest, MmrDelta, MmrError, MmrPeaks, MmrProof, Rpo256,
|
||||
RpoDigest,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// MMR
|
||||
// ===============================================================================================
|
||||
@@ -72,19 +73,36 @@ impl Mmr {
|
||||
// FUNCTIONALITY
|
||||
// ============================================================================================
|
||||
|
||||
/// Given a leaf position, returns the Merkle path to its corresponding peak. If the position
|
||||
/// is greater-or-equal than the tree size an error is returned.
|
||||
/// Returns an [MmrProof] for the leaf at the specified position.
|
||||
///
|
||||
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
|
||||
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
|
||||
/// has position 0, the second position 1, and so on.
|
||||
pub fn open(&self, pos: usize, target_forest: usize) -> Result<MmrProof, MmrError> {
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the specified leaf position is out of bounds for this MMR.
|
||||
pub fn open(&self, pos: usize) -> Result<MmrProof, MmrError> {
|
||||
self.open_at(pos, self.forest)
|
||||
}
|
||||
|
||||
/// Returns an [MmrProof] for the leaf at the specified position using the state of the MMR
|
||||
/// at the specified `forest`.
|
||||
///
|
||||
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
|
||||
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
|
||||
/// has position 0, the second position 1, and so on.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - The specified leaf position is out of bounds for this MMR.
|
||||
/// - The specified `forest` value is not valid for this MMR.
|
||||
pub fn open_at(&self, pos: usize, forest: usize) -> Result<MmrProof, MmrError> {
|
||||
// find the target tree responsible for the MMR position
|
||||
let tree_bit =
|
||||
leaf_to_corresponding_tree(pos, target_forest).ok_or(MmrError::InvalidPosition(pos))?;
|
||||
leaf_to_corresponding_tree(pos, forest).ok_or(MmrError::PositionNotFound(pos))?;
|
||||
|
||||
// isolate the trees before the target
|
||||
let forest_before = target_forest & high_bitmask(tree_bit + 1);
|
||||
let forest_before = forest & high_bitmask(tree_bit + 1);
|
||||
let index_offset = nodes_in_forest(forest_before);
|
||||
|
||||
// update the value position from global to the target tree
|
||||
@@ -94,7 +112,7 @@ impl Mmr {
|
||||
let (_, path) = self.collect_merkle_path_and_value(tree_bit, relative_pos, index_offset);
|
||||
|
||||
Ok(MmrProof {
|
||||
forest: target_forest,
|
||||
forest,
|
||||
position: pos,
|
||||
merkle_path: MerklePath::new(path),
|
||||
})
|
||||
@@ -108,7 +126,7 @@ impl Mmr {
|
||||
pub fn get(&self, pos: usize) -> Result<RpoDigest, MmrError> {
|
||||
// find the target tree responsible for the MMR position
|
||||
let tree_bit =
|
||||
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::InvalidPosition(pos))?;
|
||||
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::PositionNotFound(pos))?;
|
||||
|
||||
// isolate the trees before the target
|
||||
let forest_before = self.forest & high_bitmask(tree_bit + 1);
|
||||
@@ -145,10 +163,21 @@ impl Mmr {
|
||||
self.forest += 1;
|
||||
}
|
||||
|
||||
/// Returns an peaks of the MMR for the version specified by `forest`.
|
||||
pub fn peaks(&self, forest: usize) -> Result<MmrPeaks, MmrError> {
|
||||
/// Returns the current peaks of the MMR.
|
||||
pub fn peaks(&self) -> MmrPeaks {
|
||||
self.peaks_at(self.forest).expect("failed to get peaks at current forest")
|
||||
}
|
||||
|
||||
/// Returns the peaks of the MMR at the state specified by `forest`.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the specified `forest` value is not valid for this MMR.
|
||||
pub fn peaks_at(&self, forest: usize) -> Result<MmrPeaks, MmrError> {
|
||||
if forest > self.forest {
|
||||
return Err(MmrError::InvalidPeaks);
|
||||
return Err(MmrError::InvalidPeaks(format!(
|
||||
"requested forest {forest} exceeds current forest {}",
|
||||
self.forest
|
||||
)));
|
||||
}
|
||||
|
||||
let peaks: Vec<RpoDigest> = TrueBitPositionIterator::new(forest)
|
||||
@@ -173,7 +202,7 @@ impl Mmr {
|
||||
/// that have been merged together, followed by the new peaks of the [Mmr].
|
||||
pub fn get_delta(&self, from_forest: usize, to_forest: usize) -> Result<MmrDelta, MmrError> {
|
||||
if to_forest > self.forest || from_forest > to_forest {
|
||||
return Err(MmrError::InvalidPeaks);
|
||||
return Err(MmrError::InvalidPeaks(format!("to_forest {to_forest} exceeds the current forest {} or from_forest {from_forest} exceeds to_forest", self.forest)));
|
||||
}
|
||||
|
||||
if from_forest == to_forest {
|
||||
@@ -344,7 +373,7 @@ pub struct MmrNodes<'a> {
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for MmrNodes<'a> {
|
||||
impl Iterator for MmrNodes<'_> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
@@ -377,7 +406,8 @@ impl<'a> Iterator for MmrNodes<'a> {
|
||||
// the next parent position is one above the position of the pair
|
||||
let parent = self.last_right << 1;
|
||||
|
||||
// the left node has been paired and the current parent yielded, removed it from the forest
|
||||
// the left node has been paired and the current parent yielded, removed it from the
|
||||
// forest
|
||||
self.forest ^= self.last_right;
|
||||
if self.forest & parent == 0 {
|
||||
// this iteration yielded the left parent node
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
//! leaves count.
|
||||
use core::num::NonZeroUsize;
|
||||
|
||||
use winter_utils::{Deserializable, Serializable};
|
||||
|
||||
// IN-ORDER INDEX
|
||||
// ================================================================================================
|
||||
|
||||
@@ -112,6 +114,21 @@ impl InOrderIndex {
|
||||
}
|
||||
}
|
||||
|
||||
impl Serializable for InOrderIndex {
|
||||
fn write_into<W: winter_utils::ByteWriter>(&self, target: &mut W) {
|
||||
target.write_usize(self.idx);
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for InOrderIndex {
|
||||
fn read_from<R: winter_utils::ByteReader>(
|
||||
source: &mut R,
|
||||
) -> Result<Self, winter_utils::DeserializationError> {
|
||||
let idx = source.read_usize()?;
|
||||
Ok(InOrderIndex { idx })
|
||||
}
|
||||
}
|
||||
|
||||
// CONVERSIONS FROM IN-ORDER INDEX
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -127,6 +144,7 @@ impl From<InOrderIndex> for u64 {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use proptest::prelude::*;
|
||||
use winter_utils::{Deserializable, Serializable};
|
||||
|
||||
use super::InOrderIndex;
|
||||
|
||||
@@ -162,4 +180,12 @@ mod test {
|
||||
assert_eq!(left.sibling(), right);
|
||||
assert_eq!(left, right.sibling());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inorder_index_serialization() {
|
||||
let index = InOrderIndex::from_leaf_pos(5);
|
||||
let bytes = index.to_bytes();
|
||||
let index2 = InOrderIndex::read_from_bytes(&bytes).unwrap();
|
||||
assert_eq!(index, index2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ mod proof;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use super::{Felt, Rpo256, RpoDigest, Word};
|
||||
|
||||
// REEXPORTS
|
||||
// ================================================================================================
|
||||
pub use delta::MmrDelta;
|
||||
@@ -22,6 +20,8 @@ pub use partial::PartialMmr;
|
||||
pub use peaks::MmrPeaks;
|
||||
pub use proof::MmrProof;
|
||||
|
||||
use super::{Felt, Rpo256, RpoDigest, Word};
|
||||
|
||||
// UTILITIES
|
||||
// ===============================================================================================
|
||||
|
||||
@@ -42,8 +42,8 @@ const fn leaf_to_corresponding_tree(pos: usize, forest: usize) -> Option<u32> {
|
||||
// - this means the first tree owns from `0` up to the `2^k_0` first positions, where `k_0`
|
||||
// is the highest true bit position, the second tree from `2^k_0 + 1` up to `2^k_1` where
|
||||
// `k_1` is the second highest bit, so on.
|
||||
// - this means the highest bits work as a category marker, and the position is owned by
|
||||
// the first tree which doesn't share a high bit with the position
|
||||
// - this means the highest bits work as a category marker, and the position is owned by the
|
||||
// first tree which doesn't share a high bit with the position
|
||||
let before = forest & pos;
|
||||
let after = forest ^ before;
|
||||
let tree = after.ilog2();
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use winter_utils::{Deserializable, Serializable};
|
||||
|
||||
use super::{MmrDelta, MmrProof, Rpo256, RpoDigest};
|
||||
use crate::merkle::{
|
||||
mmr::{leaf_to_corresponding_tree, nodes_in_forest},
|
||||
InOrderIndex, InnerNodeInfo, MerklePath, MmrError, MmrPeaks,
|
||||
};
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
// TYPE ALIASES
|
||||
// ================================================================================================
|
||||
@@ -142,7 +145,7 @@ impl PartialMmr {
|
||||
/// in the underlying MMR.
|
||||
pub fn open(&self, pos: usize) -> Result<Option<MmrProof>, MmrError> {
|
||||
let tree_bit =
|
||||
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::InvalidPosition(pos))?;
|
||||
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::PositionNotFound(pos))?;
|
||||
let depth = tree_bit as usize;
|
||||
|
||||
let mut nodes = Vec::with_capacity(depth);
|
||||
@@ -183,7 +186,7 @@ impl PartialMmr {
|
||||
pub fn inner_nodes<'a, I: Iterator<Item = (usize, RpoDigest)> + 'a>(
|
||||
&'a self,
|
||||
mut leaves: I,
|
||||
) -> impl Iterator<Item = InnerNodeInfo> + '_ {
|
||||
) -> impl Iterator<Item = InnerNodeInfo> + 'a {
|
||||
let stack = if let Some((pos, leaf)) = leaves.next() {
|
||||
let idx = InOrderIndex::from_leaf_pos(pos);
|
||||
vec![(idx, leaf)]
|
||||
@@ -295,7 +298,7 @@ impl PartialMmr {
|
||||
// invalid.
|
||||
let tree = 1 << path.depth();
|
||||
if tree & self.forest == 0 {
|
||||
return Err(MmrError::UnknownPeak);
|
||||
return Err(MmrError::UnknownPeak(path.depth()));
|
||||
};
|
||||
|
||||
if leaf_pos + 1 == self.forest
|
||||
@@ -316,9 +319,11 @@ impl PartialMmr {
|
||||
|
||||
// Compute the root of the authentication path, and check it matches the current version of
|
||||
// the PartialMmr.
|
||||
let computed = path.compute_root(path_idx as u64, leaf).map_err(MmrError::MerkleError)?;
|
||||
let computed = path
|
||||
.compute_root(path_idx as u64, leaf)
|
||||
.map_err(MmrError::MerkleRootComputationFailed)?;
|
||||
if self.peaks[peak_pos] != computed {
|
||||
return Err(MmrError::InvalidPeak);
|
||||
return Err(MmrError::PeakPathMismatch);
|
||||
}
|
||||
|
||||
let mut idx = InOrderIndex::from_leaf_pos(leaf_pos);
|
||||
@@ -353,7 +358,10 @@ impl PartialMmr {
|
||||
/// inserted into the partial MMR.
|
||||
pub fn apply(&mut self, delta: MmrDelta) -> Result<Vec<(InOrderIndex, RpoDigest)>, MmrError> {
|
||||
if delta.forest < self.forest {
|
||||
return Err(MmrError::InvalidPeaks);
|
||||
return Err(MmrError::InvalidPeaks(format!(
|
||||
"forest of mmr delta {} is less than current forest {}",
|
||||
delta.forest, self.forest
|
||||
)));
|
||||
}
|
||||
|
||||
let mut inserted_nodes = Vec::new();
|
||||
@@ -536,7 +544,7 @@ pub struct InnerNodeIterator<'a, I: Iterator<Item = (usize, RpoDigest)>> {
|
||||
seen_nodes: BTreeSet<InOrderIndex>,
|
||||
}
|
||||
|
||||
impl<'a, I: Iterator<Item = (usize, RpoDigest)>> Iterator for InnerNodeIterator<'a, I> {
|
||||
impl<I: Iterator<Item = (usize, RpoDigest)>> Iterator for InnerNodeIterator<'_, I> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
@@ -572,6 +580,28 @@ impl<'a, I: Iterator<Item = (usize, RpoDigest)>> Iterator for InnerNodeIterator<
|
||||
}
|
||||
}
|
||||
|
||||
impl Serializable for PartialMmr {
|
||||
fn write_into<W: winter_utils::ByteWriter>(&self, target: &mut W) {
|
||||
self.forest.write_into(target);
|
||||
self.peaks.write_into(target);
|
||||
self.nodes.write_into(target);
|
||||
target.write_bool(self.track_latest);
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for PartialMmr {
|
||||
fn read_from<R: winter_utils::ByteReader>(
|
||||
source: &mut R,
|
||||
) -> Result<Self, winter_utils::DeserializationError> {
|
||||
let forest = usize::read_from(source)?;
|
||||
let peaks = Vec::<RpoDigest>::read_from(source)?;
|
||||
let nodes = NodeMap::read_from(source)?;
|
||||
let track_latest = source.read_bool()?;
|
||||
|
||||
Ok(Self { forest, peaks, nodes, track_latest })
|
||||
}
|
||||
}
|
||||
|
||||
// UTILS
|
||||
// ================================================================================================
|
||||
|
||||
@@ -613,12 +643,15 @@ fn forest_to_rightmost_index(forest: usize) -> InOrderIndex {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
use winter_utils::{Deserializable, Serializable};
|
||||
|
||||
use super::{
|
||||
forest_to_rightmost_index, forest_to_root_index, InOrderIndex, MmrPeaks, PartialMmr,
|
||||
RpoDigest,
|
||||
};
|
||||
use crate::merkle::{int_to_node, MerkleStore, Mmr, NodeIndex};
|
||||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
const LEAVES: [RpoDigest; 7] = [
|
||||
int_to_node(0),
|
||||
@@ -688,18 +721,18 @@ mod tests {
|
||||
// build an MMR with 10 nodes (2 peaks) and a partial MMR based on it
|
||||
let mut mmr = Mmr::default();
|
||||
(0..10).for_each(|i| mmr.add(int_to_node(i)));
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
|
||||
// add authentication path for position 1 and 8
|
||||
{
|
||||
let node = mmr.get(1).unwrap();
|
||||
let proof = mmr.open(1, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(1).unwrap();
|
||||
partial_mmr.track(1, node, &proof.merkle_path).unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let node = mmr.get(8).unwrap();
|
||||
let proof = mmr.open(8, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(8).unwrap();
|
||||
partial_mmr.track(8, node, &proof.merkle_path).unwrap();
|
||||
}
|
||||
|
||||
@@ -712,7 +745,7 @@ mod tests {
|
||||
validate_apply_delta(&mmr, &mut partial_mmr);
|
||||
{
|
||||
let node = mmr.get(12).unwrap();
|
||||
let proof = mmr.open(12, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(12).unwrap();
|
||||
partial_mmr.track(12, node, &proof.merkle_path).unwrap();
|
||||
assert!(partial_mmr.track_latest);
|
||||
}
|
||||
@@ -737,7 +770,7 @@ mod tests {
|
||||
let nodes_delta = partial.apply(delta).unwrap();
|
||||
|
||||
// new peaks were computed correctly
|
||||
assert_eq!(mmr.peaks(mmr.forest()).unwrap(), partial.peaks());
|
||||
assert_eq!(mmr.peaks(), partial.peaks());
|
||||
|
||||
let mut expected_nodes = nodes_before;
|
||||
for (key, value) in nodes_delta {
|
||||
@@ -753,7 +786,7 @@ mod tests {
|
||||
let index_value: u64 = index.into();
|
||||
let pos = index_value / 2;
|
||||
let proof1 = partial.open(pos as usize).unwrap().unwrap();
|
||||
let proof2 = mmr.open(pos as usize, mmr.forest()).unwrap();
|
||||
let proof2 = mmr.open(pos as usize).unwrap();
|
||||
assert_eq!(proof1, proof2);
|
||||
}
|
||||
}
|
||||
@@ -762,16 +795,16 @@ mod tests {
|
||||
fn test_partial_mmr_inner_nodes_iterator() {
|
||||
// build the MMR
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let first_peak = mmr.peaks(mmr.forest).unwrap().peaks()[0];
|
||||
let first_peak = mmr.peaks().peaks()[0];
|
||||
|
||||
// -- test single tree ----------------------------
|
||||
|
||||
// get path and node for position 1
|
||||
let node1 = mmr.get(1).unwrap();
|
||||
let proof1 = mmr.open(1, mmr.forest()).unwrap();
|
||||
let proof1 = mmr.open(1).unwrap();
|
||||
|
||||
// create partial MMR and add authentication path to node at position 1
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
partial_mmr.track(1, node1, &proof1.merkle_path).unwrap();
|
||||
|
||||
// empty iterator should have no nodes
|
||||
@@ -789,13 +822,13 @@ mod tests {
|
||||
// -- test no duplicates --------------------------
|
||||
|
||||
// build the partial MMR
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
|
||||
let node0 = mmr.get(0).unwrap();
|
||||
let proof0 = mmr.open(0, mmr.forest()).unwrap();
|
||||
let proof0 = mmr.open(0).unwrap();
|
||||
|
||||
let node2 = mmr.get(2).unwrap();
|
||||
let proof2 = mmr.open(2, mmr.forest()).unwrap();
|
||||
let proof2 = mmr.open(2).unwrap();
|
||||
|
||||
partial_mmr.track(0, node0, &proof0.merkle_path).unwrap();
|
||||
partial_mmr.track(1, node1, &proof1.merkle_path).unwrap();
|
||||
@@ -826,10 +859,10 @@ mod tests {
|
||||
// -- test multiple trees -------------------------
|
||||
|
||||
// build the partial MMR
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
|
||||
let node5 = mmr.get(5).unwrap();
|
||||
let proof5 = mmr.open(5, mmr.forest()).unwrap();
|
||||
let proof5 = mmr.open(5).unwrap();
|
||||
|
||||
partial_mmr.track(1, node1, &proof1.merkle_path).unwrap();
|
||||
partial_mmr.track(5, node5, &proof5.merkle_path).unwrap();
|
||||
@@ -841,7 +874,7 @@ mod tests {
|
||||
let index1 = NodeIndex::new(2, 1).unwrap();
|
||||
let index5 = NodeIndex::new(1, 1).unwrap();
|
||||
|
||||
let second_peak = mmr.peaks(mmr.forest).unwrap().peaks()[1];
|
||||
let second_peak = mmr.peaks().peaks()[1];
|
||||
|
||||
let path1 = store.get_path(first_peak, index1).unwrap().path;
|
||||
let path5 = store.get_path(second_peak, index5).unwrap().path;
|
||||
@@ -860,8 +893,7 @@ mod tests {
|
||||
mmr.add(el);
|
||||
partial_mmr.add(el, false);
|
||||
|
||||
let mmr_peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
assert_eq!(mmr_peaks, partial_mmr.peaks());
|
||||
assert_eq!(mmr.peaks(), partial_mmr.peaks());
|
||||
assert_eq!(mmr.forest(), partial_mmr.forest());
|
||||
}
|
||||
}
|
||||
@@ -877,12 +909,11 @@ mod tests {
|
||||
mmr.add(el);
|
||||
partial_mmr.add(el, true);
|
||||
|
||||
let mmr_peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
assert_eq!(mmr_peaks, partial_mmr.peaks());
|
||||
assert_eq!(mmr.peaks(), partial_mmr.peaks());
|
||||
assert_eq!(mmr.forest(), partial_mmr.forest());
|
||||
|
||||
for pos in 0..i {
|
||||
let mmr_proof = mmr.open(pos as usize, mmr.forest()).unwrap();
|
||||
let mmr_proof = mmr.open(pos as usize).unwrap();
|
||||
let partialmmr_proof = partial_mmr.open(pos as usize).unwrap().unwrap();
|
||||
assert_eq!(mmr_proof, partialmmr_proof);
|
||||
}
|
||||
@@ -894,8 +925,8 @@ mod tests {
|
||||
let mut mmr = Mmr::from((0..7).map(int_to_node));
|
||||
|
||||
// derive a partial Mmr from it which tracks authentication path to leaf 5
|
||||
let mut partial_mmr = PartialMmr::from_peaks(mmr.peaks(mmr.forest()).unwrap());
|
||||
let path_to_5 = mmr.open(5, mmr.forest()).unwrap().merkle_path;
|
||||
let mut partial_mmr = PartialMmr::from_peaks(mmr.peaks());
|
||||
let path_to_5 = mmr.open(5).unwrap().merkle_path;
|
||||
let leaf_at_5 = mmr.get(5).unwrap();
|
||||
partial_mmr.track(5, leaf_at_5, &path_to_5).unwrap();
|
||||
|
||||
@@ -905,6 +936,17 @@ mod tests {
|
||||
partial_mmr.add(leaf_at_7, false);
|
||||
|
||||
// the openings should be the same
|
||||
assert_eq!(mmr.open(5, mmr.forest()).unwrap(), partial_mmr.open(5).unwrap().unwrap());
|
||||
assert_eq!(mmr.open(5).unwrap(), partial_mmr.open(5).unwrap().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_mmr_serialization() {
|
||||
let mmr = Mmr::from((0..7).map(int_to_node));
|
||||
let partial_mmr = PartialMmr::from_peaks(mmr.peaks());
|
||||
|
||||
let bytes = partial_mmr.to_bytes();
|
||||
let decoded = PartialMmr::read_from_bytes(&bytes).unwrap();
|
||||
|
||||
assert_eq!(partial_mmr, decoded);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::{super::ZERO, Felt, MmrError, MmrProof, Rpo256, RpoDigest, Word};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{super::ZERO, Felt, MmrError, MmrProof, Rpo256, RpoDigest, Word};
|
||||
|
||||
// MMR PEAKS
|
||||
// ================================================================================================
|
||||
|
||||
@@ -18,12 +19,12 @@ pub struct MmrPeaks {
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number
|
||||
/// of peaks, in this case there are 2 peaks. The 0-indexed least-significant position of
|
||||
/// the bit determines the number of elements of a tree, so the rightmost tree has `2**0`
|
||||
/// elements and the left most has `2**2`.
|
||||
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the
|
||||
/// leftmost tree has `2**3=8` elements, and the right most has `2**2=4` elements.
|
||||
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number of peaks,
|
||||
/// in this case there are 2 peaks. The 0-indexed least-significant position of the bit
|
||||
/// determines the number of elements of a tree, so the rightmost tree has `2**0` elements
|
||||
/// and the left most has `2**2`.
|
||||
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the leftmost tree has
|
||||
/// `2**3=8` elements, and the right most has `2**2=4` elements.
|
||||
num_leaves: usize,
|
||||
|
||||
/// All the peaks of every tree in the MMR forest. The peaks are always ordered by number of
|
||||
@@ -44,7 +45,11 @@ impl MmrPeaks {
|
||||
/// Returns an error if the number of leaves and the number of peaks are inconsistent.
|
||||
pub fn new(num_leaves: usize, peaks: Vec<RpoDigest>) -> Result<Self, MmrError> {
|
||||
if num_leaves.count_ones() as usize != peaks.len() {
|
||||
return Err(MmrError::InvalidPeaks);
|
||||
return Err(MmrError::InvalidPeaks(format!(
|
||||
"number of one bits in leaves is {} which does not equal peak length {}",
|
||||
num_leaves.count_ones(),
|
||||
peaks.len()
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(Self { num_leaves, peaks })
|
||||
@@ -68,6 +73,17 @@ impl MmrPeaks {
|
||||
&self.peaks
|
||||
}
|
||||
|
||||
/// Returns the peak by the provided index.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the provided peak index is greater or equal to the current number of
|
||||
/// peaks in the Mmr.
|
||||
pub fn get_peak(&self, peak_idx: usize) -> Result<&RpoDigest, MmrError> {
|
||||
self.peaks
|
||||
.get(peak_idx)
|
||||
.ok_or(MmrError::PeakOutOfBounds { peak_idx, peaks_len: self.peaks.len() })
|
||||
}
|
||||
|
||||
/// Converts this [MmrPeaks] into its components: number of leaves and a vector of peaks of
|
||||
/// the underlying MMR.
|
||||
pub fn into_parts(self) -> (usize, Vec<RpoDigest>) {
|
||||
@@ -83,9 +99,18 @@ impl MmrPeaks {
|
||||
Rpo256::hash_elements(&self.flatten_and_pad_peaks())
|
||||
}
|
||||
|
||||
pub fn verify(&self, value: RpoDigest, opening: MmrProof) -> bool {
|
||||
let root = &self.peaks[opening.peak_index()];
|
||||
opening.merkle_path.verify(opening.relative_pos() as u64, value, root)
|
||||
/// Verifies the Merkle opening proof.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - provided opening proof is invalid.
|
||||
/// - Mmr root value computed using the provided leaf value differs from the actual one.
|
||||
pub fn verify(&self, value: RpoDigest, opening: MmrProof) -> Result<(), MmrError> {
|
||||
let root = self.get_peak(opening.peak_index())?;
|
||||
opening
|
||||
.merkle_path
|
||||
.verify(opening.relative_pos() as u64, value, root)
|
||||
.map_err(MmrError::InvalidMerklePath)
|
||||
}
|
||||
|
||||
/// Flattens and pads the peaks to make hashing inside of the Miden VM easier.
|
||||
@@ -94,16 +119,15 @@ impl MmrPeaks {
|
||||
/// - Flatten the vector of Words into a vector of Felts.
|
||||
/// - Pad the peaks with ZERO to an even number of words, this removes the need to handle RPO
|
||||
/// padding.
|
||||
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of
|
||||
/// hashing.
|
||||
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of hashing.
|
||||
pub fn flatten_and_pad_peaks(&self) -> Vec<Felt> {
|
||||
let num_peaks = self.peaks.len();
|
||||
|
||||
// To achieve the padding rules above we calculate the length of the final vector.
|
||||
// This is calculated as the number of field elements. Each peak is 4 field elements.
|
||||
// The length is calculated as follows:
|
||||
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires
|
||||
// 64 field elements.
|
||||
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires 64
|
||||
// field elements.
|
||||
// - If there are more than 16 peaks and the number of peaks is odd, the data is padded to
|
||||
// an even number of peaks and as such requires `(num_peaks + 1) * 4` field elements.
|
||||
// - If there are more than 16 peaks and the number of peaks is even, the data is not padded
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{
|
||||
super::{InnerNodeInfo, Rpo256, RpoDigest},
|
||||
bit::TrueBitPositionIterator,
|
||||
@@ -8,7 +10,6 @@ use crate::{
|
||||
merkle::{int_to_node, InOrderIndex, MerklePath, MerkleTree, MmrProof, NodeIndex},
|
||||
Felt, Word,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[test]
|
||||
fn test_position_equal_or_higher_than_leafs_is_never_contained() {
|
||||
@@ -138,7 +139,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 1);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 1);
|
||||
assert_eq!(acc.peaks(), &[postorder[0]]);
|
||||
|
||||
@@ -147,7 +148,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 3);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 2);
|
||||
assert_eq!(acc.peaks(), &[postorder[2]]);
|
||||
|
||||
@@ -156,7 +157,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 4);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 3);
|
||||
assert_eq!(acc.peaks(), &[postorder[2], postorder[3]]);
|
||||
|
||||
@@ -165,7 +166,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 7);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 4);
|
||||
assert_eq!(acc.peaks(), &[postorder[6]]);
|
||||
|
||||
@@ -174,7 +175,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 8);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 5);
|
||||
assert_eq!(acc.peaks(), &[postorder[6], postorder[7]]);
|
||||
|
||||
@@ -183,7 +184,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 10);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 6);
|
||||
assert_eq!(acc.peaks(), &[postorder[6], postorder[9]]);
|
||||
|
||||
@@ -192,7 +193,7 @@ fn test_mmr_simple() {
|
||||
assert_eq!(mmr.nodes.len(), 11);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 7);
|
||||
assert_eq!(acc.peaks(), &[postorder[6], postorder[9], postorder[10]]);
|
||||
}
|
||||
@@ -204,97 +205,73 @@ fn test_mmr_open() {
|
||||
let h23 = merge(LEAVES[2], LEAVES[3]);
|
||||
|
||||
// node at pos 7 is the root
|
||||
assert!(
|
||||
mmr.open(7, mmr.forest()).is_err(),
|
||||
"Element 7 is not in the tree, result should be None"
|
||||
);
|
||||
assert!(mmr.open(7).is_err(), "Element 7 is not in the tree, result should be None");
|
||||
|
||||
// node at pos 6 is the root
|
||||
let empty: MerklePath = MerklePath::new(vec![]);
|
||||
let opening = mmr
|
||||
.open(6, mmr.forest())
|
||||
.open(6)
|
||||
.expect("Element 6 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, empty);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 6);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[6], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[6], opening).unwrap();
|
||||
|
||||
// nodes 4,5 are depth 1
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[4]]);
|
||||
let opening = mmr
|
||||
.open(5, mmr.forest())
|
||||
.open(5)
|
||||
.expect("Element 5 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 5);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[5], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[5], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[5]]);
|
||||
let opening = mmr
|
||||
.open(4, mmr.forest())
|
||||
.open(4)
|
||||
.expect("Element 4 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 4);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[4], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[4], opening).unwrap();
|
||||
|
||||
// nodes 0,1,2,3 are detph 2
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[2], h01]);
|
||||
let opening = mmr
|
||||
.open(3, mmr.forest())
|
||||
.open(3)
|
||||
.expect("Element 3 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 3);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[3], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[3], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[3], h01]);
|
||||
let opening = mmr
|
||||
.open(2, mmr.forest())
|
||||
.open(2)
|
||||
.expect("Element 2 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 2);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[2], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[2], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[0], h23]);
|
||||
let opening = mmr
|
||||
.open(1, mmr.forest())
|
||||
.open(1)
|
||||
.expect("Element 1 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 1);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[1], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[1], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[1], h23]);
|
||||
let opening = mmr
|
||||
.open(0, mmr.forest())
|
||||
.open(0)
|
||||
.expect("Element 0 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 0);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[0], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[0], opening).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -308,7 +285,7 @@ fn test_mmr_open_older_version() {
|
||||
// merkle path of a node is empty if there are no elements to pair with it
|
||||
for pos in (0..mmr.forest()).filter(is_even) {
|
||||
let forest = pos + 1;
|
||||
let proof = mmr.open(pos, forest).unwrap();
|
||||
let proof = mmr.open_at(pos, forest).unwrap();
|
||||
assert_eq!(proof.forest, forest);
|
||||
assert_eq!(proof.merkle_path.nodes(), []);
|
||||
assert_eq!(proof.position, pos);
|
||||
@@ -320,7 +297,7 @@ fn test_mmr_open_older_version() {
|
||||
for pos in 0..4 {
|
||||
let idx = NodeIndex::new(2, pos).unwrap();
|
||||
let path = mtree.get_path(idx).unwrap();
|
||||
let proof = mmr.open(pos as usize, forest).unwrap();
|
||||
let proof = mmr.open_at(pos as usize, forest).unwrap();
|
||||
assert_eq!(path, proof.merkle_path);
|
||||
}
|
||||
}
|
||||
@@ -331,7 +308,7 @@ fn test_mmr_open_older_version() {
|
||||
let path = mtree.get_path(idx).unwrap();
|
||||
// account for the bigger tree with 4 elements
|
||||
let mmr_pos = (pos + 4) as usize;
|
||||
let proof = mmr.open(mmr_pos, forest).unwrap();
|
||||
let proof = mmr.open_at(mmr_pos, forest).unwrap();
|
||||
assert_eq!(path, proof.merkle_path);
|
||||
}
|
||||
}
|
||||
@@ -357,49 +334,49 @@ fn test_mmr_open_eight() {
|
||||
let root = mtree.root();
|
||||
|
||||
let position = 0;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 1;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 2;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 3;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 4;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 5;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 6;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 7;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
@@ -415,47 +392,47 @@ fn test_mmr_open_seven() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
|
||||
let position = 0;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(0, LEAVES[0]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 1;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(1, LEAVES[1]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 2;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(2, LEAVES[2]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 3;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(3, LEAVES[3]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 4;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath = mtree2.get_path(NodeIndex::new(1, 0u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(0, LEAVES[4]).unwrap(), mtree2.root());
|
||||
|
||||
let position = 5;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath = mtree2.get_path(NodeIndex::new(1, 1u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(1, LEAVES[5]).unwrap(), mtree2.root());
|
||||
|
||||
let position = 6;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath = [].as_ref().into();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(0, LEAVES[6]).unwrap(), LEAVES[6]);
|
||||
@@ -479,7 +456,7 @@ fn test_mmr_invariants() {
|
||||
let mut mmr = Mmr::new();
|
||||
for v in 1..=1028 {
|
||||
mmr.add(int_to_node(v));
|
||||
let accumulator = mmr.peaks(mmr.forest()).unwrap();
|
||||
let accumulator = mmr.peaks();
|
||||
assert_eq!(v as usize, mmr.forest(), "MMR leaf count must increase by one on every add");
|
||||
assert_eq!(
|
||||
v as usize,
|
||||
@@ -565,37 +542,37 @@ fn test_mmr_peaks() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
|
||||
let forest = 0b0001;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[0]]);
|
||||
|
||||
let forest = 0b0010;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[2]]);
|
||||
|
||||
let forest = 0b0011;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[2], mmr.nodes[3]]);
|
||||
|
||||
let forest = 0b0100;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6]]);
|
||||
|
||||
let forest = 0b0101;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[7]]);
|
||||
|
||||
let forest = 0b0110;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[9]]);
|
||||
|
||||
let forest = 0b0111;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[9], mmr.nodes[10]]);
|
||||
}
|
||||
@@ -603,7 +580,7 @@ fn test_mmr_peaks() {
|
||||
#[test]
|
||||
fn test_mmr_hash_peaks() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
let peaks = mmr.peaks();
|
||||
|
||||
let first_peak = Rpo256::merge(&[
|
||||
Rpo256::merge(&[LEAVES[0], LEAVES[1]]),
|
||||
@@ -657,7 +634,7 @@ fn test_mmr_peaks_hash_odd() {
|
||||
#[test]
|
||||
fn test_mmr_delta() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
|
||||
// original_forest can't have more elements
|
||||
assert!(
|
||||
@@ -757,7 +734,7 @@ fn test_mmr_delta_old_forest() {
|
||||
#[test]
|
||||
fn test_partial_mmr_simple() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
let peaks = mmr.peaks();
|
||||
let mut partial: PartialMmr = peaks.clone().into();
|
||||
|
||||
// check initial state of the partial mmr
|
||||
@@ -768,7 +745,7 @@ fn test_partial_mmr_simple() {
|
||||
assert_eq!(partial.nodes.len(), 0);
|
||||
|
||||
// check state after adding tracking one element
|
||||
let proof1 = mmr.open(0, mmr.forest()).unwrap();
|
||||
let proof1 = mmr.open(0).unwrap();
|
||||
let el1 = mmr.get(proof1.position).unwrap();
|
||||
partial.track(proof1.position, el1, &proof1.merkle_path).unwrap();
|
||||
|
||||
@@ -780,7 +757,7 @@ fn test_partial_mmr_simple() {
|
||||
let idx = idx.parent();
|
||||
assert_eq!(partial.nodes[&idx.sibling()], proof1.merkle_path[1]);
|
||||
|
||||
let proof2 = mmr.open(1, mmr.forest()).unwrap();
|
||||
let proof2 = mmr.open(1).unwrap();
|
||||
let el2 = mmr.get(proof2.position).unwrap();
|
||||
partial.track(proof2.position, el2, &proof2.merkle_path).unwrap();
|
||||
|
||||
@@ -798,9 +775,9 @@ fn test_partial_mmr_update_single() {
|
||||
let mut full = Mmr::new();
|
||||
let zero = int_to_node(0);
|
||||
full.add(zero);
|
||||
let mut partial: PartialMmr = full.peaks(full.forest()).unwrap().into();
|
||||
let mut partial: PartialMmr = full.peaks().into();
|
||||
|
||||
let proof = full.open(0, full.forest()).unwrap();
|
||||
let proof = full.open(0).unwrap();
|
||||
partial.track(proof.position, zero, &proof.merkle_path).unwrap();
|
||||
|
||||
for i in 1..100 {
|
||||
@@ -810,9 +787,9 @@ fn test_partial_mmr_update_single() {
|
||||
partial.apply(delta).unwrap();
|
||||
|
||||
assert_eq!(partial.forest(), full.forest());
|
||||
assert_eq!(partial.peaks(), full.peaks(full.forest()).unwrap());
|
||||
assert_eq!(partial.peaks(), full.peaks());
|
||||
|
||||
let proof1 = full.open(i as usize, full.forest()).unwrap();
|
||||
let proof1 = full.open(i as usize).unwrap();
|
||||
partial.track(proof1.position, node, &proof1.merkle_path).unwrap();
|
||||
let proof2 = partial.open(proof1.position).unwrap().unwrap();
|
||||
assert_eq!(proof1.merkle_path, proof2.merkle_path);
|
||||
@@ -822,7 +799,7 @@ fn test_partial_mmr_update_single() {
|
||||
#[test]
|
||||
fn test_mmr_add_invalid_odd_leaf() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
let mut partial: PartialMmr = acc.clone().into();
|
||||
|
||||
let empty = MerklePath::new(Vec::new());
|
||||
@@ -837,6 +814,39 @@ fn test_mmr_add_invalid_odd_leaf() {
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
/// Tests that a proof whose peak count exceeds the peak count of the MMR returns an error.
|
||||
///
|
||||
/// Here we manipulate the proof to return a peak index of 1 while the MMR only has 1 peak (with
|
||||
/// index 0).
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_mmr_proof_num_peaks_exceeds_current_num_peaks() {
|
||||
let mmr: Mmr = LEAVES[0..4].iter().cloned().into();
|
||||
let mut proof = mmr.open(3).unwrap();
|
||||
proof.forest = 5;
|
||||
proof.position = 4;
|
||||
mmr.peaks().verify(LEAVES[3], proof).unwrap();
|
||||
}
|
||||
|
||||
/// Tests that a proof whose peak count exceeds the peak count of the MMR returns an error.
|
||||
///
|
||||
/// We create an MmrProof for a leaf whose peak index to verify against is 1.
|
||||
/// Then we add another leaf which results in an Mmr with just one peak due to trees
|
||||
/// being merged. If we try to use the old proof against the new Mmr, we should get an error.
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_mmr_old_proof_num_peaks_exceeds_current_num_peaks() {
|
||||
let leaves_len = 3;
|
||||
let mut mmr = Mmr::from(LEAVES[0..leaves_len].iter().cloned());
|
||||
|
||||
let leaf_idx = leaves_len - 1;
|
||||
let proof = mmr.open(leaf_idx).unwrap();
|
||||
assert!(mmr.peaks().verify(LEAVES[leaf_idx], proof.clone()).is_ok());
|
||||
|
||||
mmr.add(LEAVES[leaves_len]);
|
||||
mmr.peaks().verify(LEAVES[leaf_idx], proof).unwrap();
|
||||
}
|
||||
|
||||
mod property_tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
|
||||
@@ -21,9 +21,11 @@ mod path;
|
||||
pub use path::{MerklePath, RootPath, ValuePath};
|
||||
|
||||
mod smt;
|
||||
#[cfg(feature = "internal")]
|
||||
pub use smt::build_subtree_for_bench;
|
||||
pub use smt::{
|
||||
LeafIndex, SimpleSmt, Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError, SMT_DEPTH,
|
||||
SMT_MAX_DEPTH, SMT_MIN_DEPTH,
|
||||
LeafIndex, MutationSet, SimpleSmt, Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError,
|
||||
SubtreeLeaf, SMT_DEPTH, SMT_MAX_DEPTH, SMT_MIN_DEPTH,
|
||||
};
|
||||
|
||||
mod mmr;
|
||||
|
||||
@@ -116,7 +116,7 @@ impl PartialMerkleTree {
|
||||
// depth of 63 because we consider passing in a vector of size 2^64 infeasible.
|
||||
let max = 2usize.pow(63);
|
||||
if layers.len() > max {
|
||||
return Err(MerkleError::InvalidNumEntries(max));
|
||||
return Err(MerkleError::TooManyEntries(max));
|
||||
}
|
||||
|
||||
// Get maximum depth
|
||||
@@ -147,11 +147,12 @@ impl PartialMerkleTree {
|
||||
let index = NodeIndex::new(depth, index_value)?;
|
||||
|
||||
// get hash of the current node
|
||||
let node = nodes.get(&index).ok_or(MerkleError::NodeNotInSet(index))?;
|
||||
let node =
|
||||
nodes.get(&index).ok_or(MerkleError::NodeIndexNotFoundInTree(index))?;
|
||||
// get hash of the sibling node
|
||||
let sibling = nodes
|
||||
.get(&index.sibling())
|
||||
.ok_or(MerkleError::NodeNotInSet(index.sibling()))?;
|
||||
.ok_or(MerkleError::NodeIndexNotFoundInTree(index.sibling()))?;
|
||||
// get parent hash
|
||||
let parent = Rpo256::merge(&index.build_node(*node, *sibling));
|
||||
|
||||
@@ -184,7 +185,10 @@ impl PartialMerkleTree {
|
||||
/// # Errors
|
||||
/// Returns an error if the specified NodeIndex is not contained in the nodes map.
|
||||
pub fn get_node(&self, index: NodeIndex) -> Result<RpoDigest, MerkleError> {
|
||||
self.nodes.get(&index).ok_or(MerkleError::NodeNotInSet(index)).copied()
|
||||
self.nodes
|
||||
.get(&index)
|
||||
.ok_or(MerkleError::NodeIndexNotFoundInTree(index))
|
||||
.copied()
|
||||
}
|
||||
|
||||
/// Returns true if provided index contains in the leaves set, false otherwise.
|
||||
@@ -224,7 +228,7 @@ impl PartialMerkleTree {
|
||||
}
|
||||
|
||||
if !self.nodes.contains_key(&index) {
|
||||
return Err(MerkleError::NodeNotInSet(index));
|
||||
return Err(MerkleError::NodeIndexNotFoundInTree(index));
|
||||
}
|
||||
|
||||
let mut path = Vec::new();
|
||||
@@ -335,15 +339,16 @@ impl PartialMerkleTree {
|
||||
if self.root() == EMPTY_DIGEST {
|
||||
self.nodes.insert(ROOT_INDEX, root);
|
||||
} else if self.root() != root {
|
||||
return Err(MerkleError::ConflictingRoots([self.root(), root].to_vec()));
|
||||
return Err(MerkleError::ConflictingRoots {
|
||||
expected_root: self.root(),
|
||||
actual_root: root,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates value of the leaf at the specified index returning the old leaf value.
|
||||
/// By default the specified index is assumed to belong to the deepest layer. If the considered
|
||||
/// node does not belong to the tree, the first node on the way to the root will be changed.
|
||||
///
|
||||
/// By default the specified index is assumed to belong to the deepest layer. If the considered
|
||||
/// node does not belong to the tree, the first node on the way to the root will be changed.
|
||||
@@ -352,6 +357,7 @@ impl PartialMerkleTree {
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - No entry exists at the specified index.
|
||||
/// - The specified index is greater than the maximum number of nodes on the deepest layer.
|
||||
pub fn update_leaf(&mut self, index: u64, value: Word) -> Result<RpoDigest, MerkleError> {
|
||||
let mut node_index = NodeIndex::new(self.max_depth(), index)?;
|
||||
@@ -367,7 +373,7 @@ impl PartialMerkleTree {
|
||||
let old_value = self
|
||||
.nodes
|
||||
.insert(node_index, value.into())
|
||||
.ok_or(MerkleError::NodeNotInSet(node_index))?;
|
||||
.ok_or(MerkleError::NodeIndexNotFoundInTree(node_index))?;
|
||||
|
||||
// if the old value and new value are the same, there is nothing to update
|
||||
if value == *old_value {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
|
||||
use super::{
|
||||
super::{
|
||||
digests_to_words, int_to_node, DefaultMerkleStore as MerkleStore, MerkleTree, NodeIndex,
|
||||
@@ -5,7 +7,6 @@ use super::{
|
||||
},
|
||||
Deserializable, InnerNodeInfo, RpoDigest, Serializable, ValuePath,
|
||||
};
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
|
||||
// TEST DATA
|
||||
// ================================================================================================
|
||||
@@ -294,7 +295,8 @@ fn leaves() {
|
||||
assert!(expected_leaves.eq(pmt.leaves()));
|
||||
}
|
||||
|
||||
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected ones.
|
||||
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected
|
||||
/// ones.
|
||||
#[test]
|
||||
fn test_inner_node_iterator() {
|
||||
let mt = MerkleTree::new(digests_to_words(&VALUES8)).unwrap();
|
||||
|
||||
@@ -54,12 +54,20 @@ impl MerklePath {
|
||||
|
||||
/// Verifies the Merkle opening proof towards the provided root.
|
||||
///
|
||||
/// Returns `true` if `node` exists at `index` in a Merkle tree with `root`.
|
||||
pub fn verify(&self, index: u64, node: RpoDigest, root: &RpoDigest) -> bool {
|
||||
match self.compute_root(index, node) {
|
||||
Ok(computed_root) => root == &computed_root,
|
||||
Err(_) => false,
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - provided node index is invalid.
|
||||
/// - root calculated during the verification differs from the provided one.
|
||||
pub fn verify(&self, index: u64, node: RpoDigest, root: &RpoDigest) -> Result<(), MerkleError> {
|
||||
let computed_root = self.compute_root(index, node)?;
|
||||
if &computed_root != root {
|
||||
return Err(MerkleError::ConflictingRoots {
|
||||
expected_root: *root,
|
||||
actual_root: computed_root,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns an iterator over every inner node of this [MerklePath].
|
||||
@@ -143,7 +151,7 @@ pub struct InnerNodeIterator<'a> {
|
||||
value: RpoDigest,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for InnerNodeIterator<'a> {
|
||||
impl Iterator for InnerNodeIterator<'_> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
||||
@@ -1,86 +1,39 @@
|
||||
use alloc::vec::Vec;
|
||||
use core::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
hash::rpo::RpoDigest,
|
||||
merkle::{LeafIndex, SMT_DEPTH},
|
||||
Word,
|
||||
};
|
||||
|
||||
// SMT LEAF ERROR
|
||||
// =================================================================================================
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SmtLeafError {
|
||||
InconsistentKeys {
|
||||
entries: Vec<(RpoDigest, Word)>,
|
||||
key_1: RpoDigest,
|
||||
key_2: RpoDigest,
|
||||
},
|
||||
InvalidNumEntriesForMultiple(usize),
|
||||
SingleKeyInconsistentWithLeafIndex {
|
||||
#[error(
|
||||
"multiple leaf requires all keys to map to the same leaf index but key1 {key_1} and key2 {key_2} map to different indices"
|
||||
)]
|
||||
InconsistentMultipleLeafKeys { key_1: RpoDigest, key_2: RpoDigest },
|
||||
#[error("single leaf key {key} maps to {actual_leaf_index:?} but was expected to map to {expected_leaf_index:?}")]
|
||||
InconsistentSingleLeafIndices {
|
||||
key: RpoDigest,
|
||||
leaf_index: LeafIndex<SMT_DEPTH>,
|
||||
expected_leaf_index: LeafIndex<SMT_DEPTH>,
|
||||
actual_leaf_index: LeafIndex<SMT_DEPTH>,
|
||||
},
|
||||
MultipleKeysInconsistentWithLeafIndex {
|
||||
#[error("supplied leaf index {leaf_index_supplied:?} does not match {leaf_index_from_keys:?} for multiple leaf")]
|
||||
InconsistentMultipleLeafIndices {
|
||||
leaf_index_from_keys: LeafIndex<SMT_DEPTH>,
|
||||
leaf_index_supplied: LeafIndex<SMT_DEPTH>,
|
||||
},
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for SmtLeafError {}
|
||||
|
||||
impl fmt::Display for SmtLeafError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use SmtLeafError::*;
|
||||
match self {
|
||||
InvalidNumEntriesForMultiple(num_entries) => {
|
||||
write!(f, "Multiple leaf requires 2 or more entries. Got: {num_entries}")
|
||||
}
|
||||
InconsistentKeys { entries, key_1, key_2 } => {
|
||||
write!(f, "Multiple leaf requires all keys to map to the same leaf index. Offending keys: {key_1} and {key_2}. Entries: {entries:?}.")
|
||||
}
|
||||
SingleKeyInconsistentWithLeafIndex { key, leaf_index } => {
|
||||
write!(
|
||||
f,
|
||||
"Single key in leaf inconsistent with leaf index. Key: {key}, leaf index: {}",
|
||||
leaf_index.value()
|
||||
)
|
||||
}
|
||||
MultipleKeysInconsistentWithLeafIndex {
|
||||
leaf_index_from_keys,
|
||||
leaf_index_supplied,
|
||||
} => {
|
||||
write!(
|
||||
f,
|
||||
"Keys in entries map to leaf index {}, but leaf index {} was supplied",
|
||||
leaf_index_from_keys.value(),
|
||||
leaf_index_supplied.value()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[error("multiple leaf requires at least two entries but only {0} were given")]
|
||||
MultipleLeafRequiresTwoEntries(usize),
|
||||
}
|
||||
|
||||
// SMT PROOF ERROR
|
||||
// =================================================================================================
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SmtProofError {
|
||||
InvalidPathLength(usize),
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for SmtProofError {}
|
||||
|
||||
impl fmt::Display for SmtProofError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use SmtProofError::*;
|
||||
match self {
|
||||
InvalidPathLength(path_length) => {
|
||||
write!(f, "Invalid Merkle path length. Expected {SMT_DEPTH}, got {path_length}")
|
||||
}
|
||||
}
|
||||
}
|
||||
#[error("merkle path length {0} does not match SMT depth {SMT_DEPTH}")]
|
||||
InvalidMerklePathLength(usize),
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ impl SmtLeaf {
|
||||
///
|
||||
/// # Errors
|
||||
/// - Returns an error if 2 keys in `entries` map to a different leaf index
|
||||
/// - Returns an error if 1 or more keys in `entries` map to a leaf index
|
||||
/// different from `leaf_index`
|
||||
/// - Returns an error if 1 or more keys in `entries` map to a leaf index different from
|
||||
/// `leaf_index`
|
||||
pub fn new(
|
||||
entries: Vec<(RpoDigest, Word)>,
|
||||
leaf_index: LeafIndex<SMT_DEPTH>,
|
||||
@@ -31,29 +31,31 @@ impl SmtLeaf {
|
||||
1 => {
|
||||
let (key, value) = entries[0];
|
||||
|
||||
if LeafIndex::<SMT_DEPTH>::from(key) != leaf_index {
|
||||
return Err(SmtLeafError::SingleKeyInconsistentWithLeafIndex {
|
||||
let computed_index = LeafIndex::<SMT_DEPTH>::from(key);
|
||||
if computed_index != leaf_index {
|
||||
return Err(SmtLeafError::InconsistentSingleLeafIndices {
|
||||
key,
|
||||
leaf_index,
|
||||
expected_leaf_index: leaf_index,
|
||||
actual_leaf_index: computed_index,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self::new_single(key, value))
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
let leaf = Self::new_multiple(entries)?;
|
||||
|
||||
// `new_multiple()` checked that all keys map to the same leaf index. We still need
|
||||
// to ensure that that leaf index is `leaf_index`.
|
||||
if leaf.index() != leaf_index {
|
||||
Err(SmtLeafError::MultipleKeysInconsistentWithLeafIndex {
|
||||
Err(SmtLeafError::InconsistentMultipleLeafIndices {
|
||||
leaf_index_from_keys: leaf.index(),
|
||||
leaf_index_supplied: leaf_index,
|
||||
})
|
||||
} else {
|
||||
Ok(leaf)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +77,7 @@ impl SmtLeaf {
|
||||
/// - Returns an error if 2 keys in `entries` map to a different leaf index
|
||||
pub fn new_multiple(entries: Vec<(RpoDigest, Word)>) -> Result<Self, SmtLeafError> {
|
||||
if entries.len() < 2 {
|
||||
return Err(SmtLeafError::InvalidNumEntriesForMultiple(entries.len()));
|
||||
return Err(SmtLeafError::MultipleLeafRequiresTwoEntries(entries.len()));
|
||||
}
|
||||
|
||||
// Check that all keys map to the same leaf index
|
||||
@@ -89,8 +91,7 @@ impl SmtLeaf {
|
||||
let next_leaf_index: LeafIndex<SMT_DEPTH> = next_key.into();
|
||||
|
||||
if next_leaf_index != first_leaf_index {
|
||||
return Err(SmtLeafError::InconsistentKeys {
|
||||
entries,
|
||||
return Err(SmtLeafError::InconsistentMultipleLeafKeys {
|
||||
key_1: first_key,
|
||||
key_2: next_key,
|
||||
});
|
||||
@@ -118,7 +119,7 @@ impl SmtLeaf {
|
||||
// Note: All keys are guaranteed to have the same leaf index
|
||||
let (first_key, _) = entries[0];
|
||||
first_key.into()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +130,7 @@ impl SmtLeaf {
|
||||
SmtLeaf::Single(_) => 1,
|
||||
SmtLeaf::Multiple(entries) => {
|
||||
entries.len().try_into().expect("shouldn't have more than 2^64 entries")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,7 +142,7 @@ impl SmtLeaf {
|
||||
SmtLeaf::Multiple(kvs) => {
|
||||
let elements: Vec<Felt> = kvs.iter().copied().flat_map(kv_to_elements).collect();
|
||||
Rpo256::hash_elements(&elements)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,7 +183,8 @@ impl SmtLeaf {
|
||||
// HELPERS
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
|
||||
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another leaf.
|
||||
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another
|
||||
/// leaf.
|
||||
pub(super) fn get_value(&self, key: &RpoDigest) -> Option<Word> {
|
||||
// Ensure that `key` maps to this leaf
|
||||
if self.index() != key.into() {
|
||||
@@ -197,7 +199,7 @@ impl SmtLeaf {
|
||||
} else {
|
||||
Some(EMPTY_WORD)
|
||||
}
|
||||
}
|
||||
},
|
||||
SmtLeaf::Multiple(kv_pairs) => {
|
||||
for (key_in_leaf, value_in_leaf) in kv_pairs {
|
||||
if key == key_in_leaf {
|
||||
@@ -206,7 +208,7 @@ impl SmtLeaf {
|
||||
}
|
||||
|
||||
Some(EMPTY_WORD)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -219,7 +221,7 @@ impl SmtLeaf {
|
||||
SmtLeaf::Empty(_) => {
|
||||
*self = SmtLeaf::new_single(key, value);
|
||||
None
|
||||
}
|
||||
},
|
||||
SmtLeaf::Single(kv_pair) => {
|
||||
if kv_pair.0 == key {
|
||||
// the key is already in this leaf. Update the value and return the previous
|
||||
@@ -237,7 +239,7 @@ impl SmtLeaf {
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
SmtLeaf::Multiple(kv_pairs) => {
|
||||
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
|
||||
Ok(pos) => {
|
||||
@@ -245,14 +247,14 @@ impl SmtLeaf {
|
||||
kv_pairs[pos].1 = value;
|
||||
|
||||
Some(old_value)
|
||||
}
|
||||
},
|
||||
Err(pos) => {
|
||||
kv_pairs.insert(pos, (key, value));
|
||||
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,7 +279,7 @@ impl SmtLeaf {
|
||||
// another key is stored at leaf; nothing to update
|
||||
(None, false)
|
||||
}
|
||||
}
|
||||
},
|
||||
SmtLeaf::Multiple(kv_pairs) => {
|
||||
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
|
||||
Ok(pos) => {
|
||||
@@ -292,13 +294,13 @@ impl SmtLeaf {
|
||||
}
|
||||
|
||||
(Some(old_value), false)
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// other keys are stored at leaf; nothing to update
|
||||
(None, false)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -349,7 +351,7 @@ impl Deserializable for SmtLeaf {
|
||||
// ================================================================================================
|
||||
|
||||
/// Converts a key-value tuple to an iterator of `Felt`s
|
||||
fn kv_to_elements((key, value): (RpoDigest, Word)) -> impl Iterator<Item = Felt> {
|
||||
pub(crate) fn kv_to_elements((key, value): (RpoDigest, Word)) -> impl Iterator<Item = Felt> {
|
||||
let key_elements = key.into_iter();
|
||||
let value_elements = value.into_iter();
|
||||
|
||||
@@ -358,7 +360,7 @@ fn kv_to_elements((key, value): (RpoDigest, Word)) -> impl Iterator<Item = Felt>
|
||||
|
||||
/// Compares two keys, compared element-by-element using their integer representations starting with
|
||||
/// the most significant element.
|
||||
fn cmp_keys(key_1: RpoDigest, key_2: RpoDigest) -> Ordering {
|
||||
pub(crate) fn cmp_keys(key_1: RpoDigest, key_2: RpoDigest) -> Ordering {
|
||||
for (v1, v2) in key_1.iter().zip(key_2.iter()).rev() {
|
||||
let v1 = v1.as_int();
|
||||
let v2 = v2.as_int();
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use super::{
|
||||
EmptySubtreeRoots, Felt, InnerNode, InnerNodeInfo, LeafIndex, MerkleError, MerklePath,
|
||||
NodeIndex, Rpo256, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
|
||||
};
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
string::ToString,
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use super::{
|
||||
EmptySubtreeRoots, Felt, InnerNode, InnerNodeInfo, LeafIndex, MerkleError, MerklePath,
|
||||
MutationSet, NodeIndex, Rpo256, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
|
||||
};
|
||||
|
||||
mod error;
|
||||
pub use error::{SmtLeafError, SmtProofError};
|
||||
|
||||
@@ -32,8 +33,8 @@ pub const SMT_DEPTH: u8 = 64;
|
||||
/// Sparse Merkle tree mapping 256-bit keys to 256-bit values. Both keys and values are represented
|
||||
/// by 4 field elements.
|
||||
///
|
||||
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf to
|
||||
/// which the key maps.
|
||||
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf
|
||||
/// to which the key maps.
|
||||
///
|
||||
/// A leaf is either empty, or holds one or more key-value pairs. An empty leaf hashes to the empty
|
||||
/// word. Otherwise, a leaf hashes to the hash of its key-value pairs, ordered by key first, value
|
||||
@@ -70,12 +71,51 @@ impl Smt {
|
||||
|
||||
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
|
||||
///
|
||||
/// If the `concurrent` feature is enabled, this function uses a parallel implementation to
|
||||
/// process the entries efficiently, otherwise it defaults to the sequential implementation.
|
||||
///
|
||||
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the provided entries contain multiple values for the same key.
|
||||
pub fn with_entries(
|
||||
entries: impl IntoIterator<Item = (RpoDigest, Word)>,
|
||||
) -> Result<Self, MerkleError> {
|
||||
#[cfg(feature = "concurrent")]
|
||||
{
|
||||
let mut seen_keys = BTreeSet::new();
|
||||
let entries: Vec<_> = entries
|
||||
.into_iter()
|
||||
.map(|(key, value)| {
|
||||
if seen_keys.insert(key) {
|
||||
Ok((key, value))
|
||||
} else {
|
||||
Err(MerkleError::DuplicateValuesForIndex(
|
||||
LeafIndex::<SMT_DEPTH>::from(key).value(),
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect::<Result<_, _>>()?;
|
||||
if entries.is_empty() {
|
||||
return Ok(Self::default());
|
||||
}
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::with_entries_par(entries)
|
||||
}
|
||||
#[cfg(not(feature = "concurrent"))]
|
||||
{
|
||||
Self::with_entries_sequential(entries)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
|
||||
///
|
||||
/// This sequential implementation processes entries one at a time to build the tree.
|
||||
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the provided entries contain multiple values for the same key.
|
||||
pub fn with_entries_sequential(
|
||||
entries: impl IntoIterator<Item = (RpoDigest, Word)>,
|
||||
) -> Result<Self, MerkleError> {
|
||||
// create an empty tree
|
||||
let mut tree = Self::new();
|
||||
@@ -100,6 +140,23 @@ impl Smt {
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
/// Returns a new [`Smt`] instantiated from already computed leaves and nodes.
|
||||
///
|
||||
/// This function performs minimal consistency checking. It is the caller's responsibility to
|
||||
/// ensure the passed arguments are correct and consistent with each other.
|
||||
///
|
||||
/// # Panics
|
||||
/// With debug assertions on, this function panics if `root` does not match the root node in
|
||||
/// `inner_nodes`.
|
||||
pub fn from_raw_parts(
|
||||
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
|
||||
leaves: BTreeMap<u64, SmtLeaf>,
|
||||
root: RpoDigest,
|
||||
) -> Self {
|
||||
// Our particular implementation of `from_raw_parts()` never returns `Err`.
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::from_raw_parts(inner_nodes, leaves, root).unwrap()
|
||||
}
|
||||
|
||||
// PUBLIC ACCESSORS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -120,12 +177,7 @@ impl Smt {
|
||||
|
||||
/// Returns the value associated with `key`
|
||||
pub fn get_value(&self, key: &RpoDigest) -> Word {
|
||||
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
|
||||
|
||||
match self.leaves.get(&leaf_pos) {
|
||||
Some(leaf) => leaf.get_value(key).unwrap_or_default(),
|
||||
None => EMPTY_WORD,
|
||||
}
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::get_value(self, key)
|
||||
}
|
||||
|
||||
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
|
||||
@@ -134,6 +186,12 @@ impl Smt {
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::open(self, key)
|
||||
}
|
||||
|
||||
/// Returns a boolean value indicating whether the SMT is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
debug_assert_eq!(self.leaves.is_empty(), self.root == Self::EMPTY_ROOT);
|
||||
self.root == Self::EMPTY_ROOT
|
||||
}
|
||||
|
||||
// ITERATORS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -171,6 +229,47 @@ impl Smt {
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::insert(self, key, value)
|
||||
}
|
||||
|
||||
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
|
||||
/// tree, allowing for validation before applying those changes.
|
||||
///
|
||||
/// This method returns a [`MutationSet`], which contains all the information for inserting
|
||||
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
|
||||
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
|
||||
/// [`Smt::apply_mutations()`] can be called in order to commit these changes to the Merkle
|
||||
/// tree, or [`drop()`] to discard them.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use miden_crypto::{hash::rpo::RpoDigest, Felt, Word};
|
||||
/// # use miden_crypto::merkle::{Smt, EmptySubtreeRoots, SMT_DEPTH};
|
||||
/// let mut smt = Smt::new();
|
||||
/// let pair = (RpoDigest::default(), Word::default());
|
||||
/// let mutations = smt.compute_mutations(vec![pair]);
|
||||
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
|
||||
/// smt.apply_mutations(mutations);
|
||||
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
|
||||
/// ```
|
||||
pub fn compute_mutations(
|
||||
&self,
|
||||
kv_pairs: impl IntoIterator<Item = (RpoDigest, Word)>,
|
||||
) -> MutationSet<SMT_DEPTH, RpoDigest, Word> {
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::compute_mutations(self, kv_pairs)
|
||||
}
|
||||
|
||||
/// Apply the prospective mutations computed with [`Smt::compute_mutations()`] to this tree.
|
||||
///
|
||||
/// # Errors
|
||||
/// If `mutations` was computed on a tree with a different root than this one, returns
|
||||
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
|
||||
/// the `mutations` were computed against, and the second item is the actual current root of
|
||||
/// this tree.
|
||||
pub fn apply_mutations(
|
||||
&mut self,
|
||||
mutations: MutationSet<SMT_DEPTH, RpoDigest, Word>,
|
||||
) -> Result<(), MerkleError> {
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::apply_mutations(self, mutations)
|
||||
}
|
||||
|
||||
// HELPERS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -187,7 +286,7 @@ impl Smt {
|
||||
self.leaves.insert(leaf_index.value(), SmtLeaf::Single((key, value)));
|
||||
|
||||
None
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,6 +314,20 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
||||
type Opening = SmtProof;
|
||||
|
||||
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
|
||||
const EMPTY_ROOT: RpoDigest = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
|
||||
|
||||
fn from_raw_parts(
|
||||
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
|
||||
leaves: BTreeMap<u64, SmtLeaf>,
|
||||
root: RpoDigest,
|
||||
) -> Result<Self, MerkleError> {
|
||||
if cfg!(debug_assertions) {
|
||||
let root_node = inner_nodes.get(&NodeIndex::root()).unwrap();
|
||||
assert_eq!(root_node.hash(), root);
|
||||
}
|
||||
|
||||
Ok(Self { root, inner_nodes, leaves })
|
||||
}
|
||||
|
||||
fn root(&self) -> RpoDigest {
|
||||
self.root
|
||||
@@ -225,11 +338,10 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
||||
}
|
||||
|
||||
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
|
||||
self.inner_nodes.get(&index).cloned().unwrap_or_else(|| {
|
||||
let node = EmptySubtreeRoots::entry(SMT_DEPTH, index.depth() + 1);
|
||||
|
||||
InnerNode { left: *node, right: *node }
|
||||
})
|
||||
self.inner_nodes
|
||||
.get(&index)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()))
|
||||
}
|
||||
|
||||
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) {
|
||||
@@ -249,6 +361,15 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_value(&self, key: &Self::Key) -> Self::Value {
|
||||
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
|
||||
|
||||
match self.leaves.get(&leaf_pos) {
|
||||
Some(leaf) => leaf.get_value(key).unwrap_or_default(),
|
||||
None => EMPTY_WORD,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_leaf(&self, key: &RpoDigest) -> Self::Leaf {
|
||||
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
|
||||
|
||||
@@ -262,6 +383,28 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
||||
leaf.hash()
|
||||
}
|
||||
|
||||
fn construct_prospective_leaf(
|
||||
&self,
|
||||
mut existing_leaf: SmtLeaf,
|
||||
key: &RpoDigest,
|
||||
value: &Word,
|
||||
) -> SmtLeaf {
|
||||
debug_assert_eq!(existing_leaf.index(), Self::key_to_leaf_index(key));
|
||||
|
||||
match existing_leaf {
|
||||
SmtLeaf::Empty(_) => SmtLeaf::new_single(*key, *value),
|
||||
_ => {
|
||||
if *value != EMPTY_WORD {
|
||||
existing_leaf.insert(*key, *value);
|
||||
} else {
|
||||
existing_leaf.remove(*key);
|
||||
}
|
||||
|
||||
existing_leaf
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn key_to_leaf_index(key: &RpoDigest) -> LeafIndex<SMT_DEPTH> {
|
||||
let most_significant_felt = key[3];
|
||||
LeafIndex::new_max_depth(most_significant_felt.as_int())
|
||||
@@ -270,6 +413,23 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
||||
fn path_and_leaf_to_opening(path: MerklePath, leaf: SmtLeaf) -> SmtProof {
|
||||
SmtProof::new_unchecked(path, leaf)
|
||||
}
|
||||
|
||||
fn pairs_to_leaf(mut pairs: Vec<(RpoDigest, Word)>) -> SmtLeaf {
|
||||
assert!(!pairs.is_empty());
|
||||
|
||||
if pairs.len() > 1 {
|
||||
SmtLeaf::new_multiple(pairs).unwrap()
|
||||
} else {
|
||||
let (key, value) = pairs.pop().unwrap();
|
||||
// TODO: should we ever be constructing empty leaves from pairs?
|
||||
if value == Self::EMPTY_VALUE {
|
||||
let index = Self::key_to_leaf_index(&key);
|
||||
SmtLeaf::new_empty(index)
|
||||
} else {
|
||||
SmtLeaf::new_single(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Smt {
|
||||
@@ -314,6 +474,14 @@ impl Serializable for Smt {
|
||||
target.write(value);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_size_hint(&self) -> usize {
|
||||
let entries_count = self.entries().count();
|
||||
|
||||
// Each entry is the size of a digest plus a word.
|
||||
entries_count.get_size_hint()
|
||||
+ entries_count * (RpoDigest::SERIALIZED_SIZE + EMPTY_WORD.get_size_hint())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserializable for Smt {
|
||||
@@ -339,6 +507,7 @@ fn test_smt_serialization_deserialization() {
|
||||
let smt_default = Smt::default();
|
||||
let bytes = smt_default.to_bytes();
|
||||
assert_eq!(smt_default, Smt::read_from_bytes(&bytes).unwrap());
|
||||
assert_eq!(bytes.len(), smt_default.get_size_hint());
|
||||
|
||||
// Smt with values
|
||||
let smt_leaves_2: [(RpoDigest, Word); 2] = [
|
||||
@@ -355,4 +524,5 @@ fn test_smt_serialization_deserialization() {
|
||||
|
||||
let bytes = smt.to_bytes();
|
||||
assert_eq!(smt, Smt::read_from_bytes(&bytes).unwrap());
|
||||
assert_eq!(bytes.len(), smt.get_size_hint());
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use alloc::string::ToString;
|
||||
|
||||
use super::{MerklePath, RpoDigest, SmtLeaf, SmtProofError, Word, SMT_DEPTH};
|
||||
use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
|
||||
use alloc::string::ToString;
|
||||
|
||||
/// A proof which can be used to assert membership (or non-membership) of key-value pairs in a
|
||||
/// [`super::Smt`].
|
||||
@@ -24,7 +25,7 @@ impl SmtProof {
|
||||
pub fn new(path: MerklePath, leaf: SmtLeaf) -> Result<Self, SmtProofError> {
|
||||
let depth: usize = SMT_DEPTH.into();
|
||||
if path.len() != depth {
|
||||
return Err(SmtProofError::InvalidPathLength(path.len()));
|
||||
return Err(SmtProofError::InvalidMerklePathLength(path.len()));
|
||||
}
|
||||
|
||||
Ok(Self { path, leaf })
|
||||
@@ -57,7 +58,7 @@ impl SmtProof {
|
||||
|
||||
// make sure the Merkle path resolves to the correct root
|
||||
self.compute_root() == *root
|
||||
}
|
||||
},
|
||||
// If the key maps to a different leaf, the proof cannot verify membership of `value`
|
||||
None => false,
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{Felt, LeafIndex, NodeIndex, Rpo256, RpoDigest, Smt, SmtLeaf, EMPTY_WORD, SMT_DEPTH};
|
||||
use crate::{
|
||||
merkle::{EmptySubtreeRoots, MerkleStore},
|
||||
merkle::{smt::SparseMerkleTree, EmptySubtreeRoots, MerkleStore},
|
||||
utils::{Deserializable, Serializable},
|
||||
Word, ONE, WORD_SIZE,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// SMT
|
||||
// --------------------------------------------------------------------------------------------
|
||||
@@ -257,6 +258,195 @@ fn test_smt_removal() {
|
||||
}
|
||||
}
|
||||
|
||||
/// This tests that we can correctly calculate prospective leaves -- that is, we can construct
|
||||
/// correct [`SmtLeaf`] values for a theoretical insertion on a Merkle tree without mutating or
|
||||
/// cloning the tree.
|
||||
#[test]
|
||||
fn test_prospective_hash() {
|
||||
let mut smt = Smt::default();
|
||||
|
||||
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
|
||||
|
||||
let key_1: RpoDigest = RpoDigest::from([ONE, ONE, ONE, Felt::new(raw)]);
|
||||
let key_2: RpoDigest =
|
||||
RpoDigest::from([2_u32.into(), 2_u32.into(), 2_u32.into(), Felt::new(raw)]);
|
||||
// Sort key_3 before key_1, to test non-append insertion.
|
||||
let key_3: RpoDigest =
|
||||
RpoDigest::from([0_u32.into(), 0_u32.into(), 0_u32.into(), Felt::new(raw)]);
|
||||
|
||||
let value_1 = [ONE; WORD_SIZE];
|
||||
let value_2 = [2_u32.into(); WORD_SIZE];
|
||||
let value_3: [Felt; 4] = [3_u32.into(); WORD_SIZE];
|
||||
|
||||
// insert key-value 1
|
||||
{
|
||||
let prospective =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_1), &key_1, &value_1).hash();
|
||||
smt.insert(key_1, value_1);
|
||||
|
||||
let leaf = smt.get_leaf(&key_1);
|
||||
assert_eq!(
|
||||
prospective,
|
||||
leaf.hash(),
|
||||
"prospective hash for leaf {leaf:?} did not match actual hash",
|
||||
);
|
||||
}
|
||||
|
||||
// insert key-value 2
|
||||
{
|
||||
let prospective =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_2), &key_2, &value_2).hash();
|
||||
smt.insert(key_2, value_2);
|
||||
|
||||
let leaf = smt.get_leaf(&key_2);
|
||||
assert_eq!(
|
||||
prospective,
|
||||
leaf.hash(),
|
||||
"prospective hash for leaf {leaf:?} did not match actual hash",
|
||||
);
|
||||
}
|
||||
|
||||
// insert key-value 3
|
||||
{
|
||||
let prospective =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_3), &key_3, &value_3).hash();
|
||||
smt.insert(key_3, value_3);
|
||||
|
||||
let leaf = smt.get_leaf(&key_3);
|
||||
assert_eq!(
|
||||
prospective,
|
||||
leaf.hash(),
|
||||
"prospective hash for leaf {leaf:?} did not match actual hash",
|
||||
);
|
||||
}
|
||||
|
||||
// remove key 3
|
||||
{
|
||||
let old_leaf = smt.get_leaf(&key_3);
|
||||
let old_value_3 = smt.insert(key_3, EMPTY_WORD);
|
||||
assert_eq!(old_value_3, value_3);
|
||||
let prospective_leaf =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_3), &key_3, &old_value_3);
|
||||
|
||||
assert_eq!(
|
||||
old_leaf.hash(),
|
||||
prospective_leaf.hash(),
|
||||
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
|
||||
\n original leaf: {old_leaf:?}\
|
||||
\n prospective leaf: {prospective_leaf:?}",
|
||||
);
|
||||
}
|
||||
|
||||
// remove key 2
|
||||
{
|
||||
let old_leaf = smt.get_leaf(&key_2);
|
||||
let old_value_2 = smt.insert(key_2, EMPTY_WORD);
|
||||
assert_eq!(old_value_2, value_2);
|
||||
let prospective_leaf =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_2), &key_2, &old_value_2);
|
||||
|
||||
assert_eq!(
|
||||
old_leaf.hash(),
|
||||
prospective_leaf.hash(),
|
||||
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
|
||||
\n original leaf: {old_leaf:?}\
|
||||
\n prospective leaf: {prospective_leaf:?}",
|
||||
);
|
||||
}
|
||||
|
||||
// remove key 1
|
||||
{
|
||||
let old_leaf = smt.get_leaf(&key_1);
|
||||
let old_value_1 = smt.insert(key_1, EMPTY_WORD);
|
||||
assert_eq!(old_value_1, value_1);
|
||||
let prospective_leaf =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_1), &key_1, &old_value_1);
|
||||
assert_eq!(
|
||||
old_leaf.hash(),
|
||||
prospective_leaf.hash(),
|
||||
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
|
||||
\n original leaf: {old_leaf:?}\
|
||||
\n prospective leaf: {prospective_leaf:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// This tests that we can perform prospective changes correctly.
|
||||
#[test]
|
||||
fn test_prospective_insertion() {
|
||||
let mut smt = Smt::default();
|
||||
|
||||
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
|
||||
|
||||
let key_1: RpoDigest = RpoDigest::from([ONE, ONE, ONE, Felt::new(raw)]);
|
||||
let key_2: RpoDigest =
|
||||
RpoDigest::from([2_u32.into(), 2_u32.into(), 2_u32.into(), Felt::new(raw)]);
|
||||
// Sort key_3 before key_1, to test non-append insertion.
|
||||
let key_3: RpoDigest =
|
||||
RpoDigest::from([0_u32.into(), 0_u32.into(), 0_u32.into(), Felt::new(raw)]);
|
||||
|
||||
let value_1 = [ONE; WORD_SIZE];
|
||||
let value_2 = [2_u32.into(); WORD_SIZE];
|
||||
let value_3: [Felt; 4] = [3_u32.into(); WORD_SIZE];
|
||||
|
||||
let root_empty = smt.root();
|
||||
|
||||
let root_1 = {
|
||||
smt.insert(key_1, value_1);
|
||||
smt.root()
|
||||
};
|
||||
|
||||
let root_2 = {
|
||||
smt.insert(key_2, value_2);
|
||||
smt.root()
|
||||
};
|
||||
|
||||
let root_3 = {
|
||||
smt.insert(key_3, value_3);
|
||||
smt.root()
|
||||
};
|
||||
|
||||
// Test incremental updates.
|
||||
|
||||
let mut smt = Smt::default();
|
||||
|
||||
let mutations = smt.compute_mutations(vec![(key_1, value_1)]);
|
||||
assert_eq!(mutations.root(), root_1, "prospective root 1 did not match actual root 1");
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_1, "mutations before and after apply did not match");
|
||||
|
||||
let mutations = smt.compute_mutations(vec![(key_2, value_2)]);
|
||||
assert_eq!(mutations.root(), root_2, "prospective root 2 did not match actual root 2");
|
||||
let mutations =
|
||||
smt.compute_mutations(vec![(key_3, EMPTY_WORD), (key_2, value_2), (key_3, value_3)]);
|
||||
assert_eq!(mutations.root(), root_3, "mutations before and after apply did not match");
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
|
||||
// Edge case: multiple values at the same key, where a later pair restores the original value.
|
||||
let mutations = smt.compute_mutations(vec![(key_3, EMPTY_WORD), (key_3, value_3)]);
|
||||
assert_eq!(mutations.root(), root_3);
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_3);
|
||||
|
||||
// Test batch updates, and that the order doesn't matter.
|
||||
let pairs =
|
||||
vec![(key_3, value_2), (key_2, EMPTY_WORD), (key_1, EMPTY_WORD), (key_3, EMPTY_WORD)];
|
||||
let mutations = smt.compute_mutations(pairs);
|
||||
assert_eq!(
|
||||
mutations.root(),
|
||||
root_empty,
|
||||
"prospective root for batch removal did not match actual root",
|
||||
);
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_empty, "mutations before and after apply did not match");
|
||||
|
||||
let pairs = vec![(key_3, value_3), (key_1, value_1), (key_2, value_2)];
|
||||
let mutations = smt.compute_mutations(pairs);
|
||||
assert_eq!(mutations.root(), root_3);
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_3);
|
||||
}
|
||||
|
||||
/// Tests that 2 key-value pairs stored in the same leaf have the same path
|
||||
#[test]
|
||||
fn test_smt_path_to_keys_in_same_leaf_are_equal() {
|
||||
@@ -326,6 +516,16 @@ fn test_smt_entries() {
|
||||
assert!(entries.next().is_none());
|
||||
}
|
||||
|
||||
/// Tests that `EMPTY_ROOT` constant generated in the `Smt` equals to the root of the empty tree of
|
||||
/// depth 64
|
||||
#[test]
|
||||
fn test_smt_check_empty_root_constant() {
|
||||
// get the root of the empty tree of depth 64
|
||||
let empty_root_64_depth = EmptySubtreeRoots::empty_hashes(64)[0];
|
||||
|
||||
assert_eq!(empty_root_64_depth, Smt::EMPTY_ROOT);
|
||||
}
|
||||
|
||||
// SMT LEAF
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
use core::mem;
|
||||
|
||||
use num::Integer;
|
||||
|
||||
use super::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, MerklePath, NodeIndex};
|
||||
use crate::{
|
||||
hash::rpo::{Rpo256, RpoDigest},
|
||||
Felt, Word, EMPTY_WORD,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
mod full;
|
||||
pub use full::{Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError, SMT_DEPTH};
|
||||
@@ -44,20 +48,34 @@ pub const SMT_MAX_DEPTH: u8 = 64;
|
||||
/// [SparseMerkleTree] currently doesn't support optimizations that compress Merkle proofs.
|
||||
pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
|
||||
/// The type for a key
|
||||
type Key: Clone;
|
||||
type Key: Clone + Ord;
|
||||
/// The type for a value
|
||||
type Value: Clone + PartialEq;
|
||||
/// The type for a leaf
|
||||
type Leaf;
|
||||
type Leaf: Clone;
|
||||
/// The type for an opening (i.e. a "proof") of a leaf
|
||||
type Opening;
|
||||
|
||||
/// The default value used to compute the hash of empty leaves
|
||||
const EMPTY_VALUE: Self::Value;
|
||||
|
||||
/// The root of the empty tree with provided DEPTH
|
||||
const EMPTY_ROOT: RpoDigest;
|
||||
|
||||
// PROVIDED METHODS
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
|
||||
/// Creates a new sparse Merkle tree from an existing set of key-value pairs, in parallel.
|
||||
#[cfg(feature = "concurrent")]
|
||||
fn with_entries_par(entries: Vec<(Self::Key, Self::Value)>) -> Result<Self, MerkleError>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let (inner_nodes, leaves) = Self::build_subtrees(entries);
|
||||
let root = inner_nodes.get(&NodeIndex::root()).unwrap().hash();
|
||||
Self::from_raw_parts(inner_nodes, leaves, root)
|
||||
}
|
||||
|
||||
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
|
||||
/// path to the leaf, as well as the leaf itself.
|
||||
fn open(&self, key: &Self::Key) -> Self::Opening {
|
||||
@@ -139,9 +157,165 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
|
||||
self.set_root(node_hash);
|
||||
}
|
||||
|
||||
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
|
||||
/// tree, allowing for validation before applying those changes.
|
||||
///
|
||||
/// This method returns a [`MutationSet`], which contains all the information for inserting
|
||||
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
|
||||
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
|
||||
/// [`SparseMerkleTree::apply_mutations()`] can be called in order to commit these changes to
|
||||
/// the Merkle tree, or [`drop()`] to discard them.
|
||||
fn compute_mutations(
|
||||
&self,
|
||||
kv_pairs: impl IntoIterator<Item = (Self::Key, Self::Value)>,
|
||||
) -> MutationSet<DEPTH, Self::Key, Self::Value> {
|
||||
use NodeMutation::*;
|
||||
|
||||
let mut new_root = self.root();
|
||||
let mut new_pairs: BTreeMap<Self::Key, Self::Value> = Default::default();
|
||||
let mut node_mutations: BTreeMap<NodeIndex, NodeMutation> = Default::default();
|
||||
|
||||
for (key, value) in kv_pairs {
|
||||
// If the old value and the new value are the same, there is nothing to update.
|
||||
// For the unusual case that kv_pairs has multiple values at the same key, we'll have
|
||||
// to check the key-value pairs we've already seen to get the "effective" old value.
|
||||
let old_value = new_pairs.get(&key).cloned().unwrap_or_else(|| self.get_value(&key));
|
||||
if value == old_value {
|
||||
continue;
|
||||
}
|
||||
|
||||
let leaf_index = Self::key_to_leaf_index(&key);
|
||||
let mut node_index = NodeIndex::from(leaf_index);
|
||||
|
||||
// We need the current leaf's hash to calculate the new leaf, but in the rare case that
|
||||
// `kv_pairs` has multiple pairs that go into the same leaf, then those pairs are also
|
||||
// part of the "current leaf".
|
||||
let old_leaf = {
|
||||
let pairs_at_index = new_pairs
|
||||
.iter()
|
||||
.filter(|&(new_key, _)| Self::key_to_leaf_index(new_key) == leaf_index);
|
||||
|
||||
pairs_at_index.fold(self.get_leaf(&key), |acc, (k, v)| {
|
||||
// Most of the time `pairs_at_index` should only contain a single entry (or
|
||||
// none at all), as multi-leaves should be really rare.
|
||||
let existing_leaf = acc.clone();
|
||||
self.construct_prospective_leaf(existing_leaf, k, v)
|
||||
})
|
||||
};
|
||||
|
||||
let new_leaf = self.construct_prospective_leaf(old_leaf, &key, &value);
|
||||
|
||||
let mut new_child_hash = Self::hash_leaf(&new_leaf);
|
||||
|
||||
for node_depth in (0..node_index.depth()).rev() {
|
||||
// Whether the node we're replacing is the right child or the left child.
|
||||
let is_right = node_index.is_value_odd();
|
||||
node_index.move_up();
|
||||
|
||||
let old_node = node_mutations
|
||||
.get(&node_index)
|
||||
.map(|mutation| match mutation {
|
||||
Addition(node) => node.clone(),
|
||||
Removal => EmptySubtreeRoots::get_inner_node(DEPTH, node_depth),
|
||||
})
|
||||
.unwrap_or_else(|| self.get_inner_node(node_index));
|
||||
|
||||
let new_node = if is_right {
|
||||
InnerNode {
|
||||
left: old_node.left,
|
||||
right: new_child_hash,
|
||||
}
|
||||
} else {
|
||||
InnerNode {
|
||||
left: new_child_hash,
|
||||
right: old_node.right,
|
||||
}
|
||||
};
|
||||
|
||||
// The next iteration will operate on this new node's hash.
|
||||
new_child_hash = new_node.hash();
|
||||
|
||||
let &equivalent_empty_hash = EmptySubtreeRoots::entry(DEPTH, node_depth);
|
||||
let is_removal = new_child_hash == equivalent_empty_hash;
|
||||
let new_entry = if is_removal { Removal } else { Addition(new_node) };
|
||||
node_mutations.insert(node_index, new_entry);
|
||||
}
|
||||
|
||||
// Once we're at depth 0, the last node we made is the new root.
|
||||
new_root = new_child_hash;
|
||||
// And then we're done with this pair; on to the next one.
|
||||
new_pairs.insert(key, value);
|
||||
}
|
||||
|
||||
MutationSet {
|
||||
old_root: self.root(),
|
||||
new_root,
|
||||
node_mutations,
|
||||
new_pairs,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the prospective mutations computed with [`SparseMerkleTree::compute_mutations()`] to
|
||||
/// this tree.
|
||||
///
|
||||
/// # Errors
|
||||
/// If `mutations` was computed on a tree with a different root than this one, returns
|
||||
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
|
||||
/// the `mutations` were computed against, and the second item is the actual current root of
|
||||
/// this tree.
|
||||
fn apply_mutations(
|
||||
&mut self,
|
||||
mutations: MutationSet<DEPTH, Self::Key, Self::Value>,
|
||||
) -> Result<(), MerkleError>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
use NodeMutation::*;
|
||||
let MutationSet {
|
||||
old_root,
|
||||
node_mutations,
|
||||
new_pairs,
|
||||
new_root,
|
||||
} = mutations;
|
||||
|
||||
// Guard against accidentally trying to apply mutations that were computed against a
|
||||
// different tree, including a stale version of this tree.
|
||||
if old_root != self.root() {
|
||||
return Err(MerkleError::ConflictingRoots {
|
||||
expected_root: self.root(),
|
||||
actual_root: old_root,
|
||||
});
|
||||
}
|
||||
|
||||
for (index, mutation) in node_mutations {
|
||||
match mutation {
|
||||
Removal => self.remove_inner_node(index),
|
||||
Addition(node) => self.insert_inner_node(index, node),
|
||||
}
|
||||
}
|
||||
|
||||
for (key, value) in new_pairs {
|
||||
self.insert_value(key, value);
|
||||
}
|
||||
|
||||
self.set_root(new_root);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// REQUIRED METHODS
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
|
||||
/// Construct this type from already computed leaves and nodes. The caller ensures passed
|
||||
/// arguments are correct and consistent with each other.
|
||||
fn from_raw_parts(
|
||||
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
|
||||
leaves: BTreeMap<u64, Self::Leaf>,
|
||||
root: RpoDigest,
|
||||
) -> Result<Self, MerkleError>
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
/// The root of the tree
|
||||
fn root(&self) -> RpoDigest;
|
||||
|
||||
@@ -160,27 +334,168 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
|
||||
/// Inserts a leaf node, and returns the value at the key if already exists
|
||||
fn insert_value(&mut self, key: Self::Key, value: Self::Value) -> Option<Self::Value>;
|
||||
|
||||
/// Returns the value at the specified key. Recall that by definition, any key that hasn't been
|
||||
/// updated is associated with [`Self::EMPTY_VALUE`].
|
||||
fn get_value(&self, key: &Self::Key) -> Self::Value;
|
||||
|
||||
/// Returns the leaf at the specified index.
|
||||
fn get_leaf(&self, key: &Self::Key) -> Self::Leaf;
|
||||
|
||||
/// Returns the hash of a leaf
|
||||
fn hash_leaf(leaf: &Self::Leaf) -> RpoDigest;
|
||||
|
||||
/// Returns what a leaf would look like if a key-value pair were inserted into the tree, without
|
||||
/// mutating the tree itself. The existing leaf can be empty.
|
||||
///
|
||||
/// To get a prospective leaf based on the current state of the tree, use `self.get_leaf(key)`
|
||||
/// as the argument for `existing_leaf`. The return value from this function can be chained back
|
||||
/// into this function as the first argument to continue making prospective changes.
|
||||
///
|
||||
/// # Invariants
|
||||
/// Because this method is for a prospective key-value insertion into a specific leaf,
|
||||
/// `existing_leaf` must have the same leaf index as `key` (as determined by
|
||||
/// [`SparseMerkleTree::key_to_leaf_index()`]), or the result will be meaningless.
|
||||
fn construct_prospective_leaf(
|
||||
&self,
|
||||
existing_leaf: Self::Leaf,
|
||||
key: &Self::Key,
|
||||
value: &Self::Value,
|
||||
) -> Self::Leaf;
|
||||
|
||||
/// Maps a key to a leaf index
|
||||
fn key_to_leaf_index(key: &Self::Key) -> LeafIndex<DEPTH>;
|
||||
|
||||
/// Constructs a single leaf from an arbitrary amount of key-value pairs.
|
||||
/// Those pairs must all have the same leaf index.
|
||||
fn pairs_to_leaf(pairs: Vec<(Self::Key, Self::Value)>) -> Self::Leaf;
|
||||
|
||||
/// Maps a (MerklePath, Self::Leaf) to an opening.
|
||||
///
|
||||
/// The length `path` is guaranteed to be equal to `DEPTH`
|
||||
fn path_and_leaf_to_opening(path: MerklePath, leaf: Self::Leaf) -> Self::Opening;
|
||||
|
||||
/// Performs the initial transforms for constructing a [`SparseMerkleTree`] by composing
|
||||
/// subtrees. In other words, this function takes the key-value inputs to the tree, and produces
|
||||
/// the inputs to feed into [`build_subtree()`].
|
||||
///
|
||||
/// `pairs` *must* already be sorted **by leaf index column**, not simply sorted by key. If
|
||||
/// `pairs` is not correctly sorted, the returned computations will be incorrect.
|
||||
///
|
||||
/// # Panics
|
||||
/// With debug assertions on, this function panics if it detects that `pairs` is not correctly
|
||||
/// sorted. Without debug assertions, the returned computations will be incorrect.
|
||||
fn sorted_pairs_to_leaves(
|
||||
pairs: Vec<(Self::Key, Self::Value)>,
|
||||
) -> PairComputations<u64, Self::Leaf> {
|
||||
debug_assert!(pairs.is_sorted_by_key(|(key, _)| Self::key_to_leaf_index(key).value()));
|
||||
|
||||
let mut accumulator: PairComputations<u64, Self::Leaf> = Default::default();
|
||||
let mut accumulated_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(pairs.len() / 2);
|
||||
|
||||
// As we iterate, we'll keep track of the kv-pairs we've seen so far that correspond to a
|
||||
// single leaf. When we see a pair that's in a different leaf, we'll swap these pairs
|
||||
// out and store them in our accumulated leaves.
|
||||
let mut current_leaf_buffer: Vec<(Self::Key, Self::Value)> = Default::default();
|
||||
|
||||
let mut iter = pairs.into_iter().peekable();
|
||||
while let Some((key, value)) = iter.next() {
|
||||
let col = Self::key_to_leaf_index(&key).index.value();
|
||||
let peeked_col = iter.peek().map(|(key, _v)| {
|
||||
let index = Self::key_to_leaf_index(key);
|
||||
let next_col = index.index.value();
|
||||
// We panic if `pairs` is not sorted by column.
|
||||
debug_assert!(next_col >= col);
|
||||
next_col
|
||||
});
|
||||
current_leaf_buffer.push((key, value));
|
||||
|
||||
// If the next pair is the same column as this one, then we're done after adding this
|
||||
// pair to the buffer.
|
||||
if peeked_col == Some(col) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Otherwise, the next pair is a different column, or there is no next pair. Either way
|
||||
// it's time to swap out our buffer.
|
||||
let leaf_pairs = mem::take(&mut current_leaf_buffer);
|
||||
let leaf = Self::pairs_to_leaf(leaf_pairs);
|
||||
let hash = Self::hash_leaf(&leaf);
|
||||
|
||||
accumulator.nodes.insert(col, leaf);
|
||||
accumulated_leaves.push(SubtreeLeaf { col, hash });
|
||||
|
||||
debug_assert!(current_leaf_buffer.is_empty());
|
||||
}
|
||||
|
||||
// TODO: determine is there is any notable performance difference between computing
|
||||
// subtree boundaries after the fact as an iterator adapter (like this), versus computing
|
||||
// subtree boundaries as we go. Either way this function is only used at the beginning of a
|
||||
// parallel construction, so it should not be a critical path.
|
||||
accumulator.leaves = SubtreeLeavesIter::from_leaves(&mut accumulated_leaves).collect();
|
||||
accumulator
|
||||
}
|
||||
|
||||
/// Computes the raw parts for a new sparse Merkle tree from a set of key-value pairs.
|
||||
///
|
||||
/// `entries` need not be sorted. This function will sort them.
|
||||
#[cfg(feature = "concurrent")]
|
||||
fn build_subtrees(
|
||||
mut entries: Vec<(Self::Key, Self::Value)>,
|
||||
) -> (BTreeMap<NodeIndex, InnerNode>, BTreeMap<u64, Self::Leaf>) {
|
||||
entries.sort_by_key(|item| {
|
||||
let index = Self::key_to_leaf_index(&item.0);
|
||||
index.value()
|
||||
});
|
||||
Self::build_subtrees_from_sorted_entries(entries)
|
||||
}
|
||||
|
||||
/// Computes the raw parts for a new sparse Merkle tree from a set of key-value pairs.
|
||||
///
|
||||
/// This function is mostly an implementation detail of
|
||||
/// [`SparseMerkleTree::with_entries_par()`].
|
||||
#[cfg(feature = "concurrent")]
|
||||
fn build_subtrees_from_sorted_entries(
|
||||
entries: Vec<(Self::Key, Self::Value)>,
|
||||
) -> (BTreeMap<NodeIndex, InnerNode>, BTreeMap<u64, Self::Leaf>) {
|
||||
use rayon::prelude::*;
|
||||
|
||||
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
|
||||
|
||||
let PairComputations {
|
||||
leaves: mut leaf_subtrees,
|
||||
nodes: initial_leaves,
|
||||
} = Self::sorted_pairs_to_leaves(entries);
|
||||
|
||||
for current_depth in (SUBTREE_DEPTH..=DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
|
||||
let (nodes, mut subtree_roots): (Vec<BTreeMap<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
|
||||
.into_par_iter()
|
||||
.map(|subtree| {
|
||||
debug_assert!(subtree.is_sorted());
|
||||
debug_assert!(!subtree.is_empty());
|
||||
|
||||
let (nodes, subtree_root) = build_subtree(subtree, DEPTH, current_depth);
|
||||
(nodes, subtree_root)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
|
||||
accumulated_nodes.extend(nodes.into_iter().flatten());
|
||||
|
||||
debug_assert!(!leaf_subtrees.is_empty());
|
||||
}
|
||||
(accumulated_nodes, initial_leaves)
|
||||
}
|
||||
}
|
||||
|
||||
// INNER NODE
|
||||
// ================================================================================================
|
||||
|
||||
/// This struct is public so functions returning it can be used in `benches/`, but is otherwise not
|
||||
/// part of the public API.
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
|
||||
pub(crate) struct InnerNode {
|
||||
pub struct InnerNode {
|
||||
pub left: RpoDigest,
|
||||
pub right: RpoDigest,
|
||||
}
|
||||
@@ -234,7 +549,7 @@ impl<const DEPTH: u8> TryFrom<NodeIndex> for LeafIndex<DEPTH> {
|
||||
|
||||
fn try_from(node_index: NodeIndex) -> Result<Self, Self::Error> {
|
||||
if node_index.depth() != DEPTH {
|
||||
return Err(MerkleError::InvalidDepth {
|
||||
return Err(MerkleError::InvalidNodeIndexDepth {
|
||||
expected: DEPTH,
|
||||
provided: node_index.depth(),
|
||||
});
|
||||
@@ -243,3 +558,244 @@ impl<const DEPTH: u8> TryFrom<NodeIndex> for LeafIndex<DEPTH> {
|
||||
Self::new(node_index.value())
|
||||
}
|
||||
}
|
||||
|
||||
// MUTATIONS
|
||||
// ================================================================================================
|
||||
|
||||
/// A change to an inner node of a [`SparseMerkleTree`] that hasn't yet been applied.
|
||||
/// [`MutationSet`] stores this type in relation to a [`NodeIndex`] to keep track of what changes
|
||||
/// need to occur at which node indices.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(crate) enum NodeMutation {
|
||||
/// Corresponds to [`SparseMerkleTree::remove_inner_node()`].
|
||||
Removal,
|
||||
/// Corresponds to [`SparseMerkleTree::insert_inner_node()`].
|
||||
Addition(InnerNode),
|
||||
}
|
||||
|
||||
/// Represents a group of prospective mutations to a `SparseMerkleTree`, created by
|
||||
/// `SparseMerkleTree::compute_mutations()`, and that can be applied with
|
||||
/// `SparseMerkleTree::apply_mutations()`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub struct MutationSet<const DEPTH: u8, K, V> {
|
||||
/// The root of the Merkle tree this MutationSet is for, recorded at the time
|
||||
/// [`SparseMerkleTree::compute_mutations()`] was called. Exists to guard against applying
|
||||
/// mutations to the wrong tree or applying stale mutations to a tree that has since changed.
|
||||
old_root: RpoDigest,
|
||||
/// The set of nodes that need to be removed or added. The "effective" node at an index is the
|
||||
/// Merkle tree's existing node at that index, with the [`NodeMutation`] in this map at that
|
||||
/// index overlayed, if any. Each [`NodeMutation::Addition`] corresponds to a
|
||||
/// [`SparseMerkleTree::insert_inner_node()`] call, and each [`NodeMutation::Removal`]
|
||||
/// corresponds to a [`SparseMerkleTree::remove_inner_node()`] call.
|
||||
node_mutations: BTreeMap<NodeIndex, NodeMutation>,
|
||||
/// The set of top-level key-value pairs we're prospectively adding to the tree, including
|
||||
/// adding empty values. The "effective" value for a key is the value in this BTreeMap, falling
|
||||
/// back to the existing value in the Merkle tree. Each entry corresponds to a
|
||||
/// [`SparseMerkleTree::insert_value()`] call.
|
||||
new_pairs: BTreeMap<K, V>,
|
||||
/// The calculated root for the Merkle tree, given these mutations. Publicly retrievable with
|
||||
/// [`MutationSet::root()`]. Corresponds to a [`SparseMerkleTree::set_root()`]. call.
|
||||
new_root: RpoDigest,
|
||||
}
|
||||
|
||||
impl<const DEPTH: u8, K, V> MutationSet<DEPTH, K, V> {
|
||||
/// Queries the root that was calculated during `SparseMerkleTree::compute_mutations()`. See
|
||||
/// that method for more information.
|
||||
pub fn root(&self) -> RpoDigest {
|
||||
self.new_root
|
||||
}
|
||||
}
|
||||
|
||||
// SUBTREES
|
||||
// ================================================================================================
|
||||
/// A subtree is of depth 8.
|
||||
const SUBTREE_DEPTH: u8 = 8;
|
||||
|
||||
/// A depth-8 subtree contains 256 "columns" that can possibly be occupied.
|
||||
const COLS_PER_SUBTREE: u64 = u64::pow(2, SUBTREE_DEPTH as u32);
|
||||
|
||||
/// Helper struct for organizing the data we care about when computing Merkle subtrees.
|
||||
///
|
||||
/// Note that these represet "conceptual" leaves of some subtree, not necessarily
|
||||
/// the leaf type for the sparse Merkle tree.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
pub struct SubtreeLeaf {
|
||||
/// The 'value' field of [`NodeIndex`]. When computing a subtree, the depth is already known.
|
||||
pub col: u64,
|
||||
/// The hash of the node this `SubtreeLeaf` represents.
|
||||
pub hash: RpoDigest,
|
||||
}
|
||||
|
||||
/// Helper struct to organize the return value of [`SparseMerkleTree::sorted_pairs_to_leaves()`].
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct PairComputations<K, L> {
|
||||
/// Literal leaves to be added to the sparse Merkle tree's internal mapping.
|
||||
pub nodes: BTreeMap<K, L>,
|
||||
/// "Conceptual" leaves that will be used for computations.
|
||||
pub leaves: Vec<Vec<SubtreeLeaf>>,
|
||||
}
|
||||
|
||||
// Derive requires `L` to impl Default, even though we don't actually need that.
|
||||
impl<K, L> Default for PairComputations<K, L> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
nodes: Default::default(),
|
||||
leaves: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SubtreeLeavesIter<'s> {
|
||||
leaves: core::iter::Peekable<alloc::vec::Drain<'s, SubtreeLeaf>>,
|
||||
}
|
||||
impl<'s> SubtreeLeavesIter<'s> {
|
||||
fn from_leaves(leaves: &'s mut Vec<SubtreeLeaf>) -> Self {
|
||||
// TODO: determine if there is any notable performance difference between taking a Vec,
|
||||
// which many need flattening first, vs storing a `Box<dyn Iterator<Item = SubtreeLeaf>>`.
|
||||
// The latter may have self-referential properties that are impossible to express in purely
|
||||
// safe Rust Rust.
|
||||
Self { leaves: leaves.drain(..).peekable() }
|
||||
}
|
||||
}
|
||||
impl core::iter::Iterator for SubtreeLeavesIter<'_> {
|
||||
type Item = Vec<SubtreeLeaf>;
|
||||
|
||||
/// Each `next()` collects an entire subtree.
|
||||
fn next(&mut self) -> Option<Vec<SubtreeLeaf>> {
|
||||
let mut subtree: Vec<SubtreeLeaf> = Default::default();
|
||||
|
||||
let mut last_subtree_col = 0;
|
||||
|
||||
while let Some(leaf) = self.leaves.peek() {
|
||||
last_subtree_col = u64::max(1, last_subtree_col);
|
||||
let is_exact_multiple = Integer::is_multiple_of(&last_subtree_col, &COLS_PER_SUBTREE);
|
||||
let next_subtree_col = if is_exact_multiple {
|
||||
u64::next_multiple_of(last_subtree_col + 1, COLS_PER_SUBTREE)
|
||||
} else {
|
||||
last_subtree_col.next_multiple_of(COLS_PER_SUBTREE)
|
||||
};
|
||||
|
||||
last_subtree_col = leaf.col;
|
||||
if leaf.col < next_subtree_col {
|
||||
subtree.push(self.leaves.next().unwrap());
|
||||
} else if subtree.is_empty() {
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if subtree.is_empty() {
|
||||
debug_assert!(self.leaves.peek().is_none());
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(subtree)
|
||||
}
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS
|
||||
// ================================================================================================
|
||||
|
||||
/// Builds Merkle nodes from a bottom layer of "leaves" -- represented by a horizontal index and
|
||||
/// the hash of the leaf at that index. `leaves` *must* be sorted by horizontal index, and
|
||||
/// `leaves` must not contain more than one depth-8 subtree's worth of leaves.
|
||||
///
|
||||
/// This function will then calculate the inner nodes above each leaf for 8 layers, as well as
|
||||
/// the "leaves" for the next 8-deep subtree, so this function can effectively be chained into
|
||||
/// itself.
|
||||
///
|
||||
/// # Panics
|
||||
/// With debug assertions on, this function panics under invalid inputs: if `leaves` contains
|
||||
/// more entries than can fit in a depth-8 subtree, if `leaves` contains leaves belonging to
|
||||
/// different depth-8 subtrees, if `bottom_depth` is lower in the tree than the specified
|
||||
/// maximum depth (`DEPTH`), or if `leaves` is not sorted.
|
||||
fn build_subtree(
|
||||
mut leaves: Vec<SubtreeLeaf>,
|
||||
tree_depth: u8,
|
||||
bottom_depth: u8,
|
||||
) -> (BTreeMap<NodeIndex, InnerNode>, SubtreeLeaf) {
|
||||
debug_assert!(bottom_depth <= tree_depth);
|
||||
debug_assert!(Integer::is_multiple_of(&bottom_depth, &SUBTREE_DEPTH));
|
||||
debug_assert!(leaves.len() <= usize::pow(2, SUBTREE_DEPTH as u32));
|
||||
let subtree_root = bottom_depth - SUBTREE_DEPTH;
|
||||
let mut inner_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
|
||||
let mut next_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(leaves.len() / 2);
|
||||
for next_depth in (subtree_root..bottom_depth).rev() {
|
||||
debug_assert!(next_depth <= bottom_depth);
|
||||
// `next_depth` is the stuff we're making.
|
||||
// `current_depth` is the stuff we have.
|
||||
let current_depth = next_depth + 1;
|
||||
let mut iter = leaves.drain(..).peekable();
|
||||
while let Some(first) = iter.next() {
|
||||
// On non-continuous iterations, including the first iteration, `first_column` may
|
||||
// be a left or right node. On subsequent continuous iterations, we will always call
|
||||
// `iter.next()` twice.
|
||||
// On non-continuous iterations (including the very first iteration), this column
|
||||
// could be either on the left or the right. If the next iteration is not
|
||||
// discontinuous with our right node, then the next iteration's
|
||||
let is_right = first.col.is_odd();
|
||||
let (left, right) = if is_right {
|
||||
// Discontinuous iteration: we have no left node, so it must be empty.
|
||||
let left = SubtreeLeaf {
|
||||
col: first.col - 1,
|
||||
hash: *EmptySubtreeRoots::entry(tree_depth, current_depth),
|
||||
};
|
||||
let right = first;
|
||||
(left, right)
|
||||
} else {
|
||||
let left = first;
|
||||
let right_col = first.col + 1;
|
||||
let right = match iter.peek().copied() {
|
||||
Some(SubtreeLeaf { col, .. }) if col == right_col => {
|
||||
// Our inputs must be sorted.
|
||||
debug_assert!(left.col <= col);
|
||||
// The next leaf in the iterator is our sibling. Use it and consume it!
|
||||
iter.next().unwrap()
|
||||
},
|
||||
// Otherwise, the leaves don't contain our sibling, so our sibling must be
|
||||
// empty.
|
||||
_ => SubtreeLeaf {
|
||||
col: right_col,
|
||||
hash: *EmptySubtreeRoots::entry(tree_depth, current_depth),
|
||||
},
|
||||
};
|
||||
(left, right)
|
||||
};
|
||||
let index = NodeIndex::new_unchecked(current_depth, left.col).parent();
|
||||
let node = InnerNode { left: left.hash, right: right.hash };
|
||||
let hash = node.hash();
|
||||
let &equivalent_empty_hash = EmptySubtreeRoots::entry(tree_depth, next_depth);
|
||||
// If this hash is empty, then it doesn't become a new inner node, nor does it count
|
||||
// as a leaf for the next depth.
|
||||
if hash != equivalent_empty_hash {
|
||||
inner_nodes.insert(index, node);
|
||||
next_leaves.push(SubtreeLeaf { col: index.value(), hash });
|
||||
}
|
||||
}
|
||||
// Stop borrowing `leaves`, so we can swap it.
|
||||
// The iterator is empty at this point anyway.
|
||||
drop(iter);
|
||||
// After each depth, consider the stuff we just made the new "leaves", and empty the
|
||||
// other collection.
|
||||
mem::swap(&mut leaves, &mut next_leaves);
|
||||
}
|
||||
debug_assert_eq!(leaves.len(), 1);
|
||||
let root = leaves.pop().unwrap();
|
||||
(inner_nodes, root)
|
||||
}
|
||||
|
||||
#[cfg(feature = "internal")]
|
||||
pub fn build_subtree_for_bench(
|
||||
leaves: Vec<SubtreeLeaf>,
|
||||
tree_depth: u8,
|
||||
bottom_depth: u8,
|
||||
) -> (BTreeMap<NodeIndex, InnerNode>, SubtreeLeaf) {
|
||||
build_subtree(leaves, tree_depth, bottom_depth)
|
||||
}
|
||||
|
||||
// TESTS
|
||||
// ================================================================================================
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use super::{
|
||||
super::ValuePath, EmptySubtreeRoots, InnerNode, InnerNodeInfo, LeafIndex, MerkleError,
|
||||
MerklePath, NodeIndex, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD, SMT_MAX_DEPTH,
|
||||
SMT_MIN_DEPTH,
|
||||
MerklePath, MutationSet, NodeIndex, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
|
||||
SMT_MAX_DEPTH, SMT_MIN_DEPTH,
|
||||
};
|
||||
use alloc::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@@ -80,7 +84,7 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
||||
|
||||
for (idx, (key, value)) in entries.into_iter().enumerate() {
|
||||
if idx >= max_num_entries {
|
||||
return Err(MerkleError::InvalidNumEntries(max_num_entries));
|
||||
return Err(MerkleError::TooManyEntries(max_num_entries));
|
||||
}
|
||||
|
||||
let old_value = tree.insert(LeafIndex::<DEPTH>::new(key)?, value);
|
||||
@@ -96,6 +100,23 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
/// Returns a new [`SimpleSmt`] instantiated from already computed leaves and nodes.
|
||||
///
|
||||
/// This function performs minimal consistency checking. It is the caller's responsibility to
|
||||
/// ensure the passed arguments are correct and consistent with each other.
|
||||
///
|
||||
/// # Panics
|
||||
/// With debug assertions on, this function panics if `root` does not match the root node in
|
||||
/// `inner_nodes`.
|
||||
pub fn from_raw_parts(
|
||||
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
|
||||
leaves: BTreeMap<u64, Word>,
|
||||
root: RpoDigest,
|
||||
) -> Self {
|
||||
// Our particular implementation of `from_raw_parts()` never returns `Err`.
|
||||
<Self as SparseMerkleTree<DEPTH>>::from_raw_parts(inner_nodes, leaves, root).unwrap()
|
||||
}
|
||||
|
||||
/// Wrapper around [`SimpleSmt::with_leaves`] which inserts leaves at contiguous indices
|
||||
/// starting at index 0.
|
||||
pub fn with_contiguous_leaves(
|
||||
@@ -157,6 +178,12 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
||||
<Self as SparseMerkleTree<DEPTH>>::open(self, key)
|
||||
}
|
||||
|
||||
/// Returns a boolean value indicating whether the SMT is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
debug_assert_eq!(self.leaves.is_empty(), self.root == Self::EMPTY_ROOT);
|
||||
self.root == Self::EMPTY_ROOT
|
||||
}
|
||||
|
||||
// ITERATORS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
@@ -187,6 +214,48 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
||||
<Self as SparseMerkleTree<DEPTH>>::insert(self, key, value)
|
||||
}
|
||||
|
||||
/// Computes what changes are necessary to insert the specified key-value pairs into this
|
||||
/// Merkle tree, allowing for validation before applying those changes.
|
||||
///
|
||||
/// This method returns a [`MutationSet`], which contains all the information for inserting
|
||||
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
|
||||
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
|
||||
/// [`SimpleSmt::apply_mutations()`] can be called in order to commit these changes to the
|
||||
/// Merkle tree, or [`drop()`] to discard them.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use miden_crypto::{hash::rpo::RpoDigest, Felt, Word};
|
||||
/// # use miden_crypto::merkle::{LeafIndex, SimpleSmt, EmptySubtreeRoots, SMT_DEPTH};
|
||||
/// let mut smt: SimpleSmt<3> = SimpleSmt::new().unwrap();
|
||||
/// let pair = (LeafIndex::default(), Word::default());
|
||||
/// let mutations = smt.compute_mutations(vec![pair]);
|
||||
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(3, 0));
|
||||
/// smt.apply_mutations(mutations);
|
||||
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(3, 0));
|
||||
/// ```
|
||||
pub fn compute_mutations(
|
||||
&self,
|
||||
kv_pairs: impl IntoIterator<Item = (LeafIndex<DEPTH>, Word)>,
|
||||
) -> MutationSet<DEPTH, LeafIndex<DEPTH>, Word> {
|
||||
<Self as SparseMerkleTree<DEPTH>>::compute_mutations(self, kv_pairs)
|
||||
}
|
||||
|
||||
/// Apply the prospective mutations computed with [`SimpleSmt::compute_mutations()`] to this
|
||||
/// tree.
|
||||
///
|
||||
/// # Errors
|
||||
/// If `mutations` was computed on a tree with a different root than this one, returns
|
||||
/// [`MerkleError::ConflictingRoots`] with a two-item [`alloc::vec::Vec`]. The first item is the
|
||||
/// root hash the `mutations` were computed against, and the second item is the actual
|
||||
/// current root of this tree.
|
||||
pub fn apply_mutations(
|
||||
&mut self,
|
||||
mutations: MutationSet<DEPTH, LeafIndex<DEPTH>, Word>,
|
||||
) -> Result<(), MerkleError> {
|
||||
<Self as SparseMerkleTree<DEPTH>>::apply_mutations(self, mutations)
|
||||
}
|
||||
|
||||
/// Inserts a subtree at the specified index. The depth at which the subtree is inserted is
|
||||
/// computed as `DEPTH - SUBTREE_DEPTH`.
|
||||
///
|
||||
@@ -197,7 +266,7 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
||||
subtree: SimpleSmt<SUBTREE_DEPTH>,
|
||||
) -> Result<RpoDigest, MerkleError> {
|
||||
if SUBTREE_DEPTH > DEPTH {
|
||||
return Err(MerkleError::InvalidSubtreeDepth {
|
||||
return Err(MerkleError::SubtreeDepthExceedsDepth {
|
||||
subtree_depth: SUBTREE_DEPTH,
|
||||
tree_depth: DEPTH,
|
||||
});
|
||||
@@ -255,6 +324,20 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
||||
type Opening = ValuePath;
|
||||
|
||||
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
|
||||
const EMPTY_ROOT: RpoDigest = *EmptySubtreeRoots::entry(DEPTH, 0);
|
||||
|
||||
fn from_raw_parts(
|
||||
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
|
||||
leaves: BTreeMap<u64, Word>,
|
||||
root: RpoDigest,
|
||||
) -> Result<Self, MerkleError> {
|
||||
if cfg!(debug_assertions) {
|
||||
let root_node = inner_nodes.get(&NodeIndex::root()).unwrap();
|
||||
assert_eq!(root_node.hash(), root);
|
||||
}
|
||||
|
||||
Ok(Self { root, inner_nodes, leaves })
|
||||
}
|
||||
|
||||
fn root(&self) -> RpoDigest {
|
||||
self.root
|
||||
@@ -265,11 +348,10 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
||||
}
|
||||
|
||||
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
|
||||
self.inner_nodes.get(&index).cloned().unwrap_or_else(|| {
|
||||
let node = EmptySubtreeRoots::entry(DEPTH, index.depth() + 1);
|
||||
|
||||
InnerNode { left: *node, right: *node }
|
||||
})
|
||||
self.inner_nodes
|
||||
.get(&index)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(DEPTH, index.depth()))
|
||||
}
|
||||
|
||||
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) {
|
||||
@@ -288,6 +370,10 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_value(&self, key: &LeafIndex<DEPTH>) -> Word {
|
||||
self.get_leaf(key)
|
||||
}
|
||||
|
||||
fn get_leaf(&self, key: &LeafIndex<DEPTH>) -> Word {
|
||||
let leaf_pos = key.value();
|
||||
match self.leaves.get(&leaf_pos) {
|
||||
@@ -301,6 +387,15 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
||||
leaf.into()
|
||||
}
|
||||
|
||||
fn construct_prospective_leaf(
|
||||
&self,
|
||||
_existing_leaf: Word,
|
||||
_key: &LeafIndex<DEPTH>,
|
||||
value: &Word,
|
||||
) -> Word {
|
||||
*value
|
||||
}
|
||||
|
||||
fn key_to_leaf_index(key: &LeafIndex<DEPTH>) -> LeafIndex<DEPTH> {
|
||||
*key
|
||||
}
|
||||
@@ -308,4 +403,11 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
||||
fn path_and_leaf_to_opening(path: MerklePath, leaf: Word) -> ValuePath {
|
||||
(path, leaf).into()
|
||||
}
|
||||
|
||||
fn pairs_to_leaf(mut pairs: Vec<(LeafIndex<DEPTH>, Word)>) -> Word {
|
||||
// SimpleSmt can't have more than one value per key.
|
||||
assert_eq!(pairs.len(), 1);
|
||||
let (_key, value) = pairs.pop().unwrap();
|
||||
value
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use assert_matches::assert_matches;
|
||||
|
||||
use super::{
|
||||
super::{MerkleError, RpoDigest, SimpleSmt},
|
||||
NodeIndex,
|
||||
@@ -10,7 +14,6 @@ use crate::{
|
||||
},
|
||||
Word, EMPTY_WORD,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// TEST DATA
|
||||
// ================================================================================================
|
||||
@@ -256,12 +259,12 @@ fn test_simplesmt_fail_on_duplicates() {
|
||||
// consecutive
|
||||
let entries = [(1, *first), (1, *second)];
|
||||
let smt = SimpleSmt::<64>::with_leaves(entries);
|
||||
assert_eq!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
|
||||
assert_matches!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
|
||||
|
||||
// not consecutive
|
||||
let entries = [(1, *first), (5, int_to_leaf(5)), (1, *second)];
|
||||
let smt = SimpleSmt::<64>::with_leaves(entries);
|
||||
assert_eq!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
|
||||
assert_matches!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,6 +446,23 @@ fn test_simplesmt_set_subtree_entire_tree() {
|
||||
assert_eq!(tree.root(), *EmptySubtreeRoots::entry(DEPTH, 0));
|
||||
}
|
||||
|
||||
/// Tests that `EMPTY_ROOT` constant generated in the `SimpleSmt` equals to the root of the empty
|
||||
/// tree of depth 64
|
||||
#[test]
|
||||
fn test_simplesmt_check_empty_root_constant() {
|
||||
// get the root of the empty tree of depth 64
|
||||
let empty_root_64_depth = EmptySubtreeRoots::empty_hashes(64)[0];
|
||||
assert_eq!(empty_root_64_depth, SimpleSmt::<64>::EMPTY_ROOT);
|
||||
|
||||
// get the root of the empty tree of depth 32
|
||||
let empty_root_32_depth = EmptySubtreeRoots::empty_hashes(32)[0];
|
||||
assert_eq!(empty_root_32_depth, SimpleSmt::<32>::EMPTY_ROOT);
|
||||
|
||||
// get the root of the empty tree of depth 0
|
||||
let empty_root_1_depth = EmptySubtreeRoots::empty_hashes(1)[0];
|
||||
assert_eq!(empty_root_1_depth, SimpleSmt::<1>::EMPTY_ROOT);
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
417
src/merkle/smt/tests.rs
Normal file
417
src/merkle/smt/tests.rs
Normal file
@@ -0,0 +1,417 @@
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
|
||||
use super::{
|
||||
build_subtree, InnerNode, LeafIndex, NodeIndex, PairComputations, SmtLeaf, SparseMerkleTree,
|
||||
SubtreeLeaf, SubtreeLeavesIter, COLS_PER_SUBTREE, SUBTREE_DEPTH,
|
||||
};
|
||||
use crate::{
|
||||
hash::rpo::RpoDigest,
|
||||
merkle::{Smt, SMT_DEPTH},
|
||||
Felt, Word, ONE,
|
||||
};
|
||||
|
||||
fn smtleaf_to_subtree_leaf(leaf: &SmtLeaf) -> SubtreeLeaf {
|
||||
SubtreeLeaf {
|
||||
col: leaf.index().index.value(),
|
||||
hash: leaf.hash(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sorted_pairs_to_leaves() {
|
||||
let entries: Vec<(RpoDigest, Word)> = vec![
|
||||
// Subtree 0.
|
||||
(RpoDigest::new([ONE, ONE, ONE, Felt::new(16)]), [ONE; 4]),
|
||||
(RpoDigest::new([ONE, ONE, ONE, Felt::new(17)]), [ONE; 4]),
|
||||
// Leaf index collision.
|
||||
(RpoDigest::new([ONE, ONE, Felt::new(10), Felt::new(20)]), [ONE; 4]),
|
||||
(RpoDigest::new([ONE, ONE, Felt::new(20), Felt::new(20)]), [ONE; 4]),
|
||||
// Subtree 1. Normal single leaf again.
|
||||
(RpoDigest::new([ONE, ONE, ONE, Felt::new(400)]), [ONE; 4]), // Subtree boundary.
|
||||
(RpoDigest::new([ONE, ONE, ONE, Felt::new(401)]), [ONE; 4]),
|
||||
// Subtree 2. Another normal leaf.
|
||||
(RpoDigest::new([ONE, ONE, ONE, Felt::new(1024)]), [ONE; 4]),
|
||||
];
|
||||
|
||||
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
|
||||
|
||||
let control_leaves: Vec<SmtLeaf> = {
|
||||
let mut entries_iter = entries.iter().cloned();
|
||||
let mut next_entry = || entries_iter.next().unwrap();
|
||||
let control_leaves = vec![
|
||||
// Subtree 0.
|
||||
SmtLeaf::Single(next_entry()),
|
||||
SmtLeaf::Single(next_entry()),
|
||||
SmtLeaf::new_multiple(vec![next_entry(), next_entry()]).unwrap(),
|
||||
// Subtree 1.
|
||||
SmtLeaf::Single(next_entry()),
|
||||
SmtLeaf::Single(next_entry()),
|
||||
// Subtree 2.
|
||||
SmtLeaf::Single(next_entry()),
|
||||
];
|
||||
assert_eq!(entries_iter.next(), None);
|
||||
control_leaves
|
||||
};
|
||||
|
||||
let control_subtree_leaves: Vec<Vec<SubtreeLeaf>> = {
|
||||
let mut control_leaves_iter = control_leaves.iter();
|
||||
let mut next_leaf = || control_leaves_iter.next().unwrap();
|
||||
|
||||
let control_subtree_leaves: Vec<Vec<SubtreeLeaf>> = [
|
||||
// Subtree 0.
|
||||
vec![next_leaf(), next_leaf(), next_leaf()],
|
||||
// Subtree 1.
|
||||
vec![next_leaf(), next_leaf()],
|
||||
// Subtree 2.
|
||||
vec![next_leaf()],
|
||||
]
|
||||
.map(|subtree| subtree.into_iter().map(smtleaf_to_subtree_leaf).collect())
|
||||
.to_vec();
|
||||
assert_eq!(control_leaves_iter.next(), None);
|
||||
control_subtree_leaves
|
||||
};
|
||||
|
||||
let subtrees: PairComputations<u64, SmtLeaf> = Smt::sorted_pairs_to_leaves(entries);
|
||||
// This will check that the hashes, columns, and subtree assignments all match.
|
||||
assert_eq!(subtrees.leaves, control_subtree_leaves);
|
||||
|
||||
// Flattening and re-separating out the leaves into subtrees should have the same result.
|
||||
let mut all_leaves: Vec<SubtreeLeaf> = subtrees.leaves.clone().into_iter().flatten().collect();
|
||||
let re_grouped: Vec<Vec<_>> = SubtreeLeavesIter::from_leaves(&mut all_leaves).collect();
|
||||
assert_eq!(subtrees.leaves, re_grouped);
|
||||
|
||||
// Then finally we might as well check the computed leaf nodes too.
|
||||
let control_leaves: BTreeMap<u64, SmtLeaf> = control
|
||||
.leaves()
|
||||
.map(|(index, value)| (index.index.value(), value.clone()))
|
||||
.collect();
|
||||
|
||||
for (column, test_leaf) in subtrees.nodes {
|
||||
if test_leaf.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let control_leaf = control_leaves
|
||||
.get(&column)
|
||||
.unwrap_or_else(|| panic!("no leaf node found for column {column}"));
|
||||
assert_eq!(control_leaf, &test_leaf);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper for the below tests.
|
||||
fn generate_entries(pair_count: u64) -> Vec<(RpoDigest, Word)> {
|
||||
(0..pair_count)
|
||||
.map(|i| {
|
||||
let leaf_index = ((i as f64 / pair_count as f64) * (pair_count as f64)) as u64;
|
||||
let key = RpoDigest::new([ONE, ONE, Felt::new(i), Felt::new(leaf_index)]);
|
||||
let value = [ONE, ONE, ONE, Felt::new(i)];
|
||||
(key, value)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_subtree() {
|
||||
// A single subtree's worth of leaves.
|
||||
const PAIR_COUNT: u64 = COLS_PER_SUBTREE;
|
||||
|
||||
let entries = generate_entries(PAIR_COUNT);
|
||||
|
||||
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
|
||||
|
||||
// `entries` should already be sorted by nature of how we constructed it.
|
||||
let leaves = Smt::sorted_pairs_to_leaves(entries).leaves;
|
||||
let leaves = leaves.into_iter().next().unwrap();
|
||||
|
||||
let (first_subtree, subtree_root) = build_subtree(leaves, SMT_DEPTH, SMT_DEPTH);
|
||||
assert!(!first_subtree.is_empty());
|
||||
|
||||
// The inner nodes computed from that subtree should match the nodes in our control tree.
|
||||
for (index, node) in first_subtree.into_iter() {
|
||||
let control = control.get_inner_node(index);
|
||||
assert_eq!(
|
||||
control, node,
|
||||
"subtree-computed node at index {index:?} does not match control",
|
||||
);
|
||||
}
|
||||
|
||||
// The root returned should also match the equivalent node in the control tree.
|
||||
let control_root_index =
|
||||
NodeIndex::new(SMT_DEPTH - SUBTREE_DEPTH, subtree_root.col).expect("Valid root index");
|
||||
let control_root_node = control.get_inner_node(control_root_index);
|
||||
let control_hash = control_root_node.hash();
|
||||
assert_eq!(
|
||||
control_hash, subtree_root.hash,
|
||||
"Subtree-computed root at index {control_root_index:?} does not match control"
|
||||
);
|
||||
}
|
||||
|
||||
// Test that not just can we compute a subtree correctly, but we can feed the results of one
|
||||
// subtree into computing another. In other words, test that `build_subtree()` is correctly
|
||||
// composable.
|
||||
#[test]
|
||||
fn test_two_subtrees() {
|
||||
// Two subtrees' worth of leaves.
|
||||
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 2;
|
||||
|
||||
let entries = generate_entries(PAIR_COUNT);
|
||||
|
||||
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
|
||||
|
||||
let PairComputations { leaves, .. } = Smt::sorted_pairs_to_leaves(entries);
|
||||
// With two subtrees' worth of leaves, we should have exactly two subtrees.
|
||||
let [first, second]: [Vec<_>; 2] = leaves.try_into().unwrap();
|
||||
assert_eq!(first.len() as u64, PAIR_COUNT / 2);
|
||||
assert_eq!(first.len(), second.len());
|
||||
|
||||
let mut current_depth = SMT_DEPTH;
|
||||
let mut next_leaves: Vec<SubtreeLeaf> = Default::default();
|
||||
|
||||
let (first_nodes, first_root) = build_subtree(first, SMT_DEPTH, current_depth);
|
||||
next_leaves.push(first_root);
|
||||
|
||||
let (second_nodes, second_root) = build_subtree(second, SMT_DEPTH, current_depth);
|
||||
next_leaves.push(second_root);
|
||||
|
||||
// All new inner nodes + the new subtree-leaves should be 512, for one depth-cycle.
|
||||
let total_computed = first_nodes.len() + second_nodes.len() + next_leaves.len();
|
||||
assert_eq!(total_computed as u64, PAIR_COUNT);
|
||||
|
||||
// Verify the computed nodes of both subtrees.
|
||||
let computed_nodes = first_nodes.clone().into_iter().chain(second_nodes);
|
||||
for (index, test_node) in computed_nodes {
|
||||
let control_node = control.get_inner_node(index);
|
||||
assert_eq!(
|
||||
control_node, test_node,
|
||||
"subtree-computed node at index {index:?} does not match control",
|
||||
);
|
||||
}
|
||||
|
||||
current_depth -= SUBTREE_DEPTH;
|
||||
|
||||
let (nodes, root_leaf) = build_subtree(next_leaves, SMT_DEPTH, current_depth);
|
||||
assert_eq!(nodes.len(), SUBTREE_DEPTH as usize);
|
||||
assert_eq!(root_leaf.col, 0);
|
||||
|
||||
for (index, test_node) in nodes {
|
||||
let control_node = control.get_inner_node(index);
|
||||
assert_eq!(
|
||||
control_node, test_node,
|
||||
"subtree-computed node at index {index:?} does not match control",
|
||||
);
|
||||
}
|
||||
|
||||
let index = NodeIndex::new(current_depth - SUBTREE_DEPTH, root_leaf.col).unwrap();
|
||||
let control_root = control.get_inner_node(index).hash();
|
||||
assert_eq!(control_root, root_leaf.hash, "Root mismatch");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_singlethreaded_subtrees() {
|
||||
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
|
||||
|
||||
let entries = generate_entries(PAIR_COUNT);
|
||||
|
||||
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
|
||||
|
||||
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
|
||||
|
||||
let PairComputations {
|
||||
leaves: mut leaf_subtrees,
|
||||
nodes: test_leaves,
|
||||
} = Smt::sorted_pairs_to_leaves(entries);
|
||||
|
||||
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
|
||||
// There's no flat_map_unzip(), so this is the best we can do.
|
||||
let (nodes, mut subtree_roots): (Vec<BTreeMap<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, subtree)| {
|
||||
// Pre-assertions.
|
||||
assert!(
|
||||
subtree.is_sorted(),
|
||||
"subtree {i} at bottom-depth {current_depth} is not sorted",
|
||||
);
|
||||
assert!(
|
||||
!subtree.is_empty(),
|
||||
"subtree {i} at bottom-depth {current_depth} is empty!",
|
||||
);
|
||||
|
||||
// Do actual things.
|
||||
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
|
||||
|
||||
// Post-assertions.
|
||||
for (&index, test_node) in nodes.iter() {
|
||||
let control_node = control.get_inner_node(index);
|
||||
assert_eq!(
|
||||
test_node, &control_node,
|
||||
"depth {} subtree {}: test node does not match control at index {:?}",
|
||||
current_depth, i, index,
|
||||
);
|
||||
}
|
||||
|
||||
(nodes, subtree_root)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
// Update state between each depth iteration.
|
||||
|
||||
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
|
||||
accumulated_nodes.extend(nodes.into_iter().flatten());
|
||||
|
||||
assert!(!leaf_subtrees.is_empty(), "on depth {current_depth}");
|
||||
}
|
||||
|
||||
// Make sure the true leaves match, first checking length and then checking each individual
|
||||
// leaf.
|
||||
let control_leaves: BTreeMap<_, _> = control.leaves().collect();
|
||||
let control_leaves_len = control_leaves.len();
|
||||
let test_leaves_len = test_leaves.len();
|
||||
assert_eq!(test_leaves_len, control_leaves_len);
|
||||
for (col, ref test_leaf) in test_leaves {
|
||||
let index = LeafIndex::new_max_depth(col);
|
||||
let &control_leaf = control_leaves.get(&index).unwrap();
|
||||
assert_eq!(test_leaf, control_leaf, "test leaf at column {col} does not match control");
|
||||
}
|
||||
|
||||
// Make sure the inner nodes match, checking length first and then each individual leaf.
|
||||
let control_nodes_len = control.inner_nodes().count();
|
||||
let test_nodes_len = accumulated_nodes.len();
|
||||
assert_eq!(test_nodes_len, control_nodes_len);
|
||||
for (index, test_node) in accumulated_nodes.clone() {
|
||||
let control_node = control.get_inner_node(index);
|
||||
assert_eq!(test_node, control_node, "test node does not match control at {index:?}");
|
||||
}
|
||||
|
||||
// After the last iteration of the above for loop, we should have the new root node actually
|
||||
// in two places: one in `accumulated_nodes`, and the other as the "next leaves" return from
|
||||
// `build_subtree()`. So let's check both!
|
||||
|
||||
let control_root = control.get_inner_node(NodeIndex::root());
|
||||
|
||||
// That for loop should have left us with only one leaf subtree...
|
||||
let [leaf_subtree]: [Vec<_>; 1] = leaf_subtrees.try_into().unwrap();
|
||||
// which itself contains only one 'leaf'...
|
||||
let [root_leaf]: [SubtreeLeaf; 1] = leaf_subtree.try_into().unwrap();
|
||||
// which matches the expected root.
|
||||
assert_eq!(control.root(), root_leaf.hash);
|
||||
|
||||
// Likewise `accumulated_nodes` should contain a node at the root index...
|
||||
assert!(accumulated_nodes.contains_key(&NodeIndex::root()));
|
||||
// and it should match our actual root.
|
||||
let test_root = accumulated_nodes.get(&NodeIndex::root()).unwrap();
|
||||
assert_eq!(control_root, *test_root);
|
||||
// And of course the root we got from each place should match.
|
||||
assert_eq!(control.root(), root_leaf.hash);
|
||||
}
|
||||
|
||||
/// The parallel version of `test_singlethreaded_subtree()`.
|
||||
#[test]
|
||||
#[cfg(feature = "concurrent")]
|
||||
fn test_multithreaded_subtrees() {
|
||||
use rayon::prelude::*;
|
||||
|
||||
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
|
||||
|
||||
let entries = generate_entries(PAIR_COUNT);
|
||||
|
||||
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
|
||||
|
||||
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
|
||||
|
||||
let PairComputations {
|
||||
leaves: mut leaf_subtrees,
|
||||
nodes: test_leaves,
|
||||
} = Smt::sorted_pairs_to_leaves(entries);
|
||||
|
||||
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
|
||||
let (nodes, mut subtree_roots): (Vec<BTreeMap<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
|
||||
.into_par_iter()
|
||||
.enumerate()
|
||||
.map(|(i, subtree)| {
|
||||
// Pre-assertions.
|
||||
assert!(
|
||||
subtree.is_sorted(),
|
||||
"subtree {i} at bottom-depth {current_depth} is not sorted",
|
||||
);
|
||||
assert!(
|
||||
!subtree.is_empty(),
|
||||
"subtree {i} at bottom-depth {current_depth} is empty!",
|
||||
);
|
||||
|
||||
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
|
||||
|
||||
// Post-assertions.
|
||||
for (&index, test_node) in nodes.iter() {
|
||||
let control_node = control.get_inner_node(index);
|
||||
assert_eq!(
|
||||
test_node, &control_node,
|
||||
"depth {} subtree {}: test node does not match control at index {:?}",
|
||||
current_depth, i, index,
|
||||
);
|
||||
}
|
||||
|
||||
(nodes, subtree_root)
|
||||
})
|
||||
.unzip();
|
||||
|
||||
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
|
||||
accumulated_nodes.extend(nodes.into_iter().flatten());
|
||||
|
||||
assert!(!leaf_subtrees.is_empty(), "on depth {current_depth}");
|
||||
}
|
||||
|
||||
// Make sure the true leaves match, checking length first and then each individual leaf.
|
||||
let control_leaves: BTreeMap<_, _> = control.leaves().collect();
|
||||
let control_leaves_len = control_leaves.len();
|
||||
let test_leaves_len = test_leaves.len();
|
||||
assert_eq!(test_leaves_len, control_leaves_len);
|
||||
for (col, ref test_leaf) in test_leaves {
|
||||
let index = LeafIndex::new_max_depth(col);
|
||||
let &control_leaf = control_leaves.get(&index).unwrap();
|
||||
assert_eq!(test_leaf, control_leaf);
|
||||
}
|
||||
|
||||
// Make sure the inner nodes match, checking length first and then each individual leaf.
|
||||
let control_nodes_len = control.inner_nodes().count();
|
||||
let test_nodes_len = accumulated_nodes.len();
|
||||
assert_eq!(test_nodes_len, control_nodes_len);
|
||||
for (index, test_node) in accumulated_nodes.clone() {
|
||||
let control_node = control.get_inner_node(index);
|
||||
assert_eq!(test_node, control_node, "test node does not match control at {index:?}");
|
||||
}
|
||||
|
||||
// After the last iteration of the above for loop, we should have the new root node actually
|
||||
// in two places: one in `accumulated_nodes`, and the other as the "next leaves" return from
|
||||
// `build_subtree()`. So let's check both!
|
||||
|
||||
let control_root = control.get_inner_node(NodeIndex::root());
|
||||
|
||||
// That for loop should have left us with only one leaf subtree...
|
||||
let [leaf_subtree]: [_; 1] = leaf_subtrees.try_into().unwrap();
|
||||
// which itself contains only one 'leaf'...
|
||||
let [root_leaf]: [_; 1] = leaf_subtree.try_into().unwrap();
|
||||
// which matches the expected root.
|
||||
assert_eq!(control.root(), root_leaf.hash);
|
||||
|
||||
// Likewise `accumulated_nodes` should contain a node at the root index...
|
||||
assert!(accumulated_nodes.contains_key(&NodeIndex::root()));
|
||||
// and it should match our actual root.
|
||||
let test_root = accumulated_nodes.get(&NodeIndex::root()).unwrap();
|
||||
assert_eq!(control_root, *test_root);
|
||||
// And of course the root we got from each place should match.
|
||||
assert_eq!(control.root(), root_leaf.hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "concurrent")]
|
||||
fn test_with_entries_parallel() {
|
||||
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
|
||||
|
||||
let entries = generate_entries(PAIR_COUNT);
|
||||
|
||||
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
|
||||
|
||||
let smt = Smt::with_entries(entries.clone()).unwrap();
|
||||
assert_eq!(smt.root(), control.root());
|
||||
assert_eq!(smt, control);
|
||||
}
|
||||
@@ -127,8 +127,8 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
||||
/// # Errors
|
||||
/// This method can return the following errors:
|
||||
/// - `RootNotInStore` if the `root` is not present in the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
|
||||
/// the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
|
||||
/// store.
|
||||
pub fn get_node(&self, root: RpoDigest, index: NodeIndex) -> Result<RpoDigest, MerkleError> {
|
||||
let mut hash = root;
|
||||
|
||||
@@ -136,7 +136,10 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
||||
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
|
||||
|
||||
for i in (0..index.depth()).rev() {
|
||||
let node = self.nodes.get(&hash).ok_or(MerkleError::NodeNotInStore(hash, index))?;
|
||||
let node = self
|
||||
.nodes
|
||||
.get(&hash)
|
||||
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
|
||||
|
||||
let bit = (index.value() >> i) & 1;
|
||||
hash = if bit == 0 { node.left } else { node.right }
|
||||
@@ -152,8 +155,8 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
||||
/// # Errors
|
||||
/// This method can return the following errors:
|
||||
/// - `RootNotInStore` if the `root` is not present in the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
|
||||
/// the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
|
||||
/// store.
|
||||
pub fn get_path(&self, root: RpoDigest, index: NodeIndex) -> Result<ValuePath, MerkleError> {
|
||||
let mut hash = root;
|
||||
let mut path = Vec::with_capacity(index.depth().into());
|
||||
@@ -162,7 +165,10 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
||||
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
|
||||
|
||||
for i in (0..index.depth()).rev() {
|
||||
let node = self.nodes.get(&hash).ok_or(MerkleError::NodeNotInStore(hash, index))?;
|
||||
let node = self
|
||||
.nodes
|
||||
.get(&hash)
|
||||
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
|
||||
|
||||
let bit = (index.value() >> i) & 1;
|
||||
hash = if bit == 0 {
|
||||
@@ -421,8 +427,8 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
||||
/// # Errors
|
||||
/// This method can return the following errors:
|
||||
/// - `RootNotInStore` if the `root` is not present in the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
|
||||
/// the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
|
||||
/// store.
|
||||
pub fn set_node(
|
||||
&mut self,
|
||||
mut root: RpoDigest,
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
use assert_matches::assert_matches;
|
||||
use seq_macro::seq;
|
||||
#[cfg(feature = "std")]
|
||||
use {
|
||||
super::{Deserializable, Serializable},
|
||||
alloc::boxed::Box,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
use super::{
|
||||
DefaultMerkleStore as MerkleStore, EmptySubtreeRoots, MerkleError, MerklePath, NodeIndex,
|
||||
@@ -11,13 +18,6 @@ use crate::{
|
||||
Felt, Word, ONE, WORD_SIZE, ZERO,
|
||||
};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
use {
|
||||
super::{Deserializable, Serializable},
|
||||
alloc::boxed::Box,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
// TEST DATA
|
||||
// ================================================================================================
|
||||
|
||||
@@ -43,14 +43,14 @@ const VALUES8: [RpoDigest; 8] = [
|
||||
fn test_root_not_in_store() -> Result<(), MerkleError> {
|
||||
let mtree = MerkleTree::new(digests_to_words(&VALUES4))?;
|
||||
let store = MerkleStore::from(&mtree);
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
store.get_node(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
|
||||
Err(MerkleError::RootNotInStore(VALUES4[0])),
|
||||
Err(MerkleError::RootNotInStore(root)) if root == VALUES4[0],
|
||||
"Leaf 0 is not a root"
|
||||
);
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
store.get_path(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
|
||||
Err(MerkleError::RootNotInStore(VALUES4[0])),
|
||||
Err(MerkleError::RootNotInStore(root)) if root == VALUES4[0],
|
||||
"Leaf 0 is not a root"
|
||||
);
|
||||
|
||||
@@ -65,46 +65,46 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
|
||||
// STORE LEAVES ARE CORRECT -------------------------------------------------------------------
|
||||
// checks the leaves in the store corresponds to the expected values
|
||||
assert_eq!(
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)),
|
||||
Ok(VALUES4[0]),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(),
|
||||
VALUES4[0],
|
||||
"node 0 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)),
|
||||
Ok(VALUES4[1]),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap(),
|
||||
VALUES4[1],
|
||||
"node 1 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)),
|
||||
Ok(VALUES4[2]),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap(),
|
||||
VALUES4[2],
|
||||
"node 2 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)),
|
||||
Ok(VALUES4[3]),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap(),
|
||||
VALUES4[3],
|
||||
"node 3 must be in the tree"
|
||||
);
|
||||
|
||||
// STORE LEAVES MATCH TREE --------------------------------------------------------------------
|
||||
// sanity check the values returned by the store and the tree
|
||||
assert_eq!(
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 0)),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)),
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 0)).unwrap(),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(),
|
||||
"node 0 must be the same for both MerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 1)),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)),
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 1)).unwrap(),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap(),
|
||||
"node 1 must be the same for both MerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 2)),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)),
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 2)).unwrap(),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap(),
|
||||
"node 2 must be the same for both MerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 3)),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)),
|
||||
mtree.get_node(NodeIndex::make(mtree.depth(), 3)).unwrap(),
|
||||
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap(),
|
||||
"node 3 must be the same for both MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -116,8 +116,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 0)),
|
||||
Ok(result.path),
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 0)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -127,8 +127,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 1)),
|
||||
Ok(result.path),
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 1)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -138,8 +138,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 2)),
|
||||
Ok(result.path),
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 2)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -149,8 +149,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 3)),
|
||||
Ok(result.path),
|
||||
mtree.get_path(NodeIndex::make(mtree.depth(), 3)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -241,56 +241,56 @@ fn test_sparse_merkle_tree() -> Result<(), MerkleError> {
|
||||
// STORE LEAVES ARE CORRECT ==============================================================
|
||||
// checks the leaves in the store corresponds to the expected values
|
||||
assert_eq!(
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)),
|
||||
Ok(VALUES4[0]),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
|
||||
VALUES4[0],
|
||||
"node 0 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)),
|
||||
Ok(VALUES4[1]),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
|
||||
VALUES4[1],
|
||||
"node 1 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)),
|
||||
Ok(VALUES4[2]),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
|
||||
VALUES4[2],
|
||||
"node 2 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)),
|
||||
Ok(VALUES4[3]),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
|
||||
VALUES4[3],
|
||||
"node 3 must be in the tree"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)),
|
||||
Ok(RpoDigest::default()),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
|
||||
RpoDigest::default(),
|
||||
"unmodified node 4 must be ZERO"
|
||||
);
|
||||
|
||||
// STORE LEAVES MATCH TREE ===============================================================
|
||||
// sanity check the values returned by the store and the tree
|
||||
assert_eq!(
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 0)),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)),
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
|
||||
"node 0 must be the same for both SparseMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 1)),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)),
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
|
||||
"node 1 must be the same for both SparseMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 2)),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)),
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
|
||||
"node 2 must be the same for both SparseMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 3)),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)),
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
|
||||
"node 3 must be the same for both SparseMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 4)),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)),
|
||||
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
|
||||
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
|
||||
"node 4 must be the same for both SparseMerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -386,46 +386,46 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
|
||||
// STORE LEAVES ARE CORRECT ==============================================================
|
||||
// checks the leaves in the store corresponds to the expected values
|
||||
assert_eq!(
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)),
|
||||
Ok(VALUES4[0]),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
|
||||
VALUES4[0],
|
||||
"node 0 must be in the pmt"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)),
|
||||
Ok(VALUES4[1]),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
|
||||
VALUES4[1],
|
||||
"node 1 must be in the pmt"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)),
|
||||
Ok(VALUES4[2]),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
|
||||
VALUES4[2],
|
||||
"node 2 must be in the pmt"
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)),
|
||||
Ok(VALUES4[3]),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
|
||||
VALUES4[3],
|
||||
"node 3 must be in the pmt"
|
||||
);
|
||||
|
||||
// STORE LEAVES MATCH PMT ================================================================
|
||||
// sanity check the values returned by the store and the pmt
|
||||
assert_eq!(
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 0)),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)),
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
|
||||
"node 0 must be the same for both PartialMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 1)),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)),
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
|
||||
"node 1 must be the same for both PartialMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 2)),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)),
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
|
||||
"node 2 must be the same for both PartialMerkleTree and MerkleStore"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 3)),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)),
|
||||
pmt.get_node(NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
|
||||
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
|
||||
"node 3 must be the same for both PartialMerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -437,8 +437,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 0)),
|
||||
Ok(result.path),
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -448,8 +448,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 1)),
|
||||
Ok(result.path),
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -459,8 +459,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 2)),
|
||||
Ok(result.path),
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -470,8 +470,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
|
||||
"Value for merkle path at index 0 must match leaf value"
|
||||
);
|
||||
assert_eq!(
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 3)),
|
||||
Ok(result.path),
|
||||
pmt.get_path(NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
|
||||
result.path,
|
||||
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
|
||||
);
|
||||
|
||||
@@ -499,7 +499,7 @@ fn wont_open_to_different_depth_root() {
|
||||
let store = MerkleStore::from(&mtree);
|
||||
let index = NodeIndex::root();
|
||||
let err = store.get_node(root, index).err().unwrap();
|
||||
assert_eq!(err, MerkleError::RootNotInStore(root));
|
||||
assert_matches!(err, MerkleError::RootNotInStore(err_root) if err_root == root);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -538,7 +538,7 @@ fn test_set_node() -> Result<(), MerkleError> {
|
||||
let value = int_to_node(42);
|
||||
let index = NodeIndex::make(mtree.depth(), 0);
|
||||
let new_root = store.set_node(mtree.root(), index, value)?.root;
|
||||
assert_eq!(store.get_node(new_root, index), Ok(value), "Value must have changed");
|
||||
assert_eq!(store.get_node(new_root, index).unwrap(), value, "value must have changed");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -614,7 +614,7 @@ fn node_path_should_be_truncated_by_midtier_insert() {
|
||||
let path = store.get_path(root, index).unwrap().path;
|
||||
assert_eq!(node, result);
|
||||
assert_eq!(path.depth(), depth);
|
||||
assert!(path.verify(index.value(), result, &root));
|
||||
assert!(path.verify(index.value(), result, &root).is_ok());
|
||||
|
||||
// flip the first bit of the key and insert the second node on a different depth
|
||||
let key = key ^ (1 << 63);
|
||||
@@ -627,7 +627,7 @@ fn node_path_should_be_truncated_by_midtier_insert() {
|
||||
let path = store.get_path(root, index).unwrap().path;
|
||||
assert_eq!(node, result);
|
||||
assert_eq!(path.depth(), depth);
|
||||
assert!(path.verify(index.value(), result, &root));
|
||||
assert!(path.verify(index.value(), result, &root).is_ok());
|
||||
|
||||
// attempt to fetch a path of the second node to depth 64
|
||||
// should fail because the previously inserted node will remove its sub-tree from the set
|
||||
@@ -746,7 +746,7 @@ fn get_leaf_depth_works_with_depth_8() {
|
||||
// duplicate the tree on `a` and assert the depth is short-circuited by such sub-tree
|
||||
let index = NodeIndex::new(8, a).unwrap();
|
||||
root = store.set_node(root, index, root).unwrap().root;
|
||||
assert_eq!(Err(MerkleError::DepthTooBig(9)), store.get_leaf_depth(root, 8, a));
|
||||
assert_matches!(store.get_leaf_depth(root, 8, a).unwrap_err(), MerkleError::DepthTooBig(9));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
|
||||
use rand_core::impls;
|
||||
|
||||
use super::{Felt, FeltRng, FieldElement, RandomCoin, RandomCoinError, RngCore, Word, ZERO};
|
||||
use crate::{
|
||||
hash::rpo::{Rpo256, RpoDigest},
|
||||
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use rand_core::impls;
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
@@ -20,8 +22,8 @@ const HALF_RATE_WIDTH: usize = (Rpo256::RATE_RANGE.end - Rpo256::RATE_RANGE.star
|
||||
/// described in <https://eprint.iacr.org/2011/499.pdf>.
|
||||
///
|
||||
/// The simplification is related to the following facts:
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function.
|
||||
/// This is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function. This
|
||||
/// is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 2. As a result of the previous point, we don't make use of an input buffer to accumulate seed
|
||||
/// material.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
@@ -143,8 +145,10 @@ impl RandomCoin for RpoRandomCoin {
|
||||
self.state[RATE_START] += nonce;
|
||||
Rpo256::apply_permutation(&mut self.state);
|
||||
|
||||
// reset the buffer
|
||||
self.current = RATE_START;
|
||||
// reset the buffer and move the next random element pointer to the second rate element.
|
||||
// this is done as the first rate element will be "biased" via the provided `nonce` to
|
||||
// contain some number of leading zeros.
|
||||
self.current = RATE_START + 1;
|
||||
|
||||
// determine how many bits are needed to represent valid values in the domain
|
||||
let v_mask = (domain_size - 1) as u64;
|
||||
@@ -170,6 +174,36 @@ impl RandomCoin for RpoRandomCoin {
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
fn reseed_with_salt(
|
||||
&mut self,
|
||||
data: <Self::Hasher as winter_crypto::Hasher>::Digest,
|
||||
salt: Option<<Self::Hasher as winter_crypto::Hasher>::Digest>,
|
||||
) {
|
||||
// Reset buffer
|
||||
self.current = RATE_START;
|
||||
|
||||
// Add the new seed material to the first half of the rate portion of the RPO state
|
||||
let data: Word = data.into();
|
||||
|
||||
self.state[RATE_START] += data[0];
|
||||
self.state[RATE_START + 1] += data[1];
|
||||
self.state[RATE_START + 2] += data[2];
|
||||
self.state[RATE_START + 3] += data[3];
|
||||
|
||||
if let Some(salt) = salt {
|
||||
// Add the salt to the second half of the rate portion of the RPO state
|
||||
let data: Word = salt.into();
|
||||
|
||||
self.state[RATE_START + 4] += data[0];
|
||||
self.state[RATE_START + 5] += data[1];
|
||||
self.state[RATE_START + 6] += data[2];
|
||||
self.state[RATE_START + 7] += data[3];
|
||||
}
|
||||
|
||||
// Absorb
|
||||
Rpo256::apply_permutation(&mut self.state);
|
||||
}
|
||||
}
|
||||
|
||||
// FELT RNG IMPLEMENTATION
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
|
||||
use rand_core::impls;
|
||||
|
||||
use super::{Felt, FeltRng, FieldElement, RandomCoin, RandomCoinError, RngCore, Word, ZERO};
|
||||
use crate::{
|
||||
hash::rpx::{Rpx256, RpxDigest},
|
||||
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use rand_core::impls;
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
@@ -20,8 +22,8 @@ const HALF_RATE_WIDTH: usize = (Rpx256::RATE_RANGE.end - Rpx256::RATE_RANGE.star
|
||||
/// described in <https://eprint.iacr.org/2011/499.pdf>.
|
||||
///
|
||||
/// The simplification is related to the following facts:
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function.
|
||||
/// This is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function. This
|
||||
/// is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 2. As a result of the previous point, we don't make use of an input buffer to accumulate seed
|
||||
/// material.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
@@ -170,6 +172,36 @@ impl RandomCoin for RpxRandomCoin {
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
|
||||
fn reseed_with_salt(
|
||||
&mut self,
|
||||
data: <Self::Hasher as winter_crypto::Hasher>::Digest,
|
||||
salt: Option<<Self::Hasher as winter_crypto::Hasher>::Digest>,
|
||||
) {
|
||||
// Reset buffer
|
||||
self.current = RATE_START;
|
||||
|
||||
// Add the new seed material to the first half of the rate portion of the RPO state
|
||||
let data: Word = data.into();
|
||||
|
||||
self.state[RATE_START] += data[0];
|
||||
self.state[RATE_START + 1] += data[1];
|
||||
self.state[RATE_START + 2] += data[2];
|
||||
self.state[RATE_START + 3] += data[3];
|
||||
|
||||
if let Some(salt) = salt {
|
||||
// Add the salt to the second half of the rate portion of the RPO state
|
||||
let data: Word = salt.into();
|
||||
|
||||
self.state[RATE_START + 4] += data[0];
|
||||
self.state[RATE_START + 5] += data[1];
|
||||
self.state[RATE_START + 6] += data[2];
|
||||
self.state[RATE_START + 7] += data[3];
|
||||
}
|
||||
|
||||
// Absorb
|
||||
Rpx256::apply_permutation(&mut self.state);
|
||||
}
|
||||
}
|
||||
|
||||
// FELT RNG IMPLEMENTATION
|
||||
|
||||
@@ -325,7 +325,8 @@ mod tests {
|
||||
let mut map = RecordingMap::new(ITEMS.to_vec());
|
||||
assert!(map.iter().all(|(x, y)| ITEMS.contains(&(*x, *y))));
|
||||
|
||||
// when inserting entry with key that already exists the iterator should return the new value
|
||||
// when inserting entry with key that already exists the iterator should return the new
|
||||
// value
|
||||
let new_value = 5;
|
||||
map.insert(4, new_value);
|
||||
assert_eq!(map.iter().count(), ITEMS.len());
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
//! Utilities used in this crate which can also be generally useful downstream.
|
||||
|
||||
use alloc::string::String;
|
||||
use core::fmt::{self, Display, Write};
|
||||
use core::fmt::{self, Write};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use super::Word;
|
||||
|
||||
@@ -46,36 +48,20 @@ pub fn bytes_to_hex_string<const N: usize>(data: [u8; N]) -> String {
|
||||
}
|
||||
|
||||
/// Defines errors which can occur during parsing of hexadecimal strings.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum HexParseError {
|
||||
#[error(
|
||||
"expected hex data to have length {expected}, including the 0x prefix, found {actual}"
|
||||
)]
|
||||
InvalidLength { expected: usize, actual: usize },
|
||||
#[error("hex encoded data must start with 0x prefix")]
|
||||
MissingPrefix,
|
||||
#[error("hex encoded data must contain only characters [a-zA-Z0-9]")]
|
||||
InvalidChar,
|
||||
#[error("hex encoded values of a Digest must be inside the field modulus")]
|
||||
OutOfRange,
|
||||
}
|
||||
|
||||
impl Display for HexParseError {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
HexParseError::InvalidLength { expected, actual } => {
|
||||
write!(f, "Expected hex data to have length {expected}, including the 0x prefix. Got {actual}")
|
||||
}
|
||||
HexParseError::MissingPrefix => {
|
||||
write!(f, "Hex encoded data must start with 0x prefix")
|
||||
}
|
||||
HexParseError::InvalidChar => {
|
||||
write!(f, "Hex encoded data must contain characters [a-zA-Z0-9]")
|
||||
}
|
||||
HexParseError::OutOfRange => {
|
||||
write!(f, "Hex encoded values of an RpoDigest must be inside the field modulus")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for HexParseError {}
|
||||
|
||||
/// Parses a hex string into an array of bytes of known size.
|
||||
pub fn hex_to_bytes<const N: usize>(value: &str) -> Result<[u8; N], HexParseError> {
|
||||
let expected: usize = (N * 2) + 2;
|
||||
|
||||
Reference in New Issue
Block a user