Browse Source

Merge pull request #323 from 0xPolygonMiden/phklive-consistent-ci

Update `Makefile` and `CI`
al-update-winterfell
Paul-Henry Kajfasz 8 months ago
committed by GitHub
parent
commit
f4a9d5b027
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
60 changed files with 623 additions and 570 deletions
  1. +3
    -0
      .config/nextest.toml
  2. +25
    -0
      .github/workflows/build.yml
  3. +23
    -0
      .github/workflows/changelog.yml
  4. +0
    -31
      .github/workflows/doc.yml
  5. +33
    -46
      .github/workflows/lint.yml
  6. +0
    -32
      .github/workflows/no-std.yml
  7. +10
    -16
      .github/workflows/test.yml
  8. +31
    -40
      .pre-commit-config.yaml
  9. +43
    -42
      CHANGELOG.md
  10. +86
    -0
      Makefile
  11. +0
    -86
      Makefile.toml
  12. +49
    -27
      README.md
  13. +0
    -1
      rust-toolchain
  14. +5
    -0
      rust-toolchain.toml
  15. +13
    -11
      rustfmt.toml
  16. +21
    -0
      scripts/check-changelog.sh
  17. +6
    -4
      scripts/check-rust-version.sh
  18. +3
    -1
      src/dsa/rpo_falcon512/hash_to_point.rs
  19. +2
    -1
      src/dsa/rpo_falcon512/keys/mod.rs
  20. +6
    -5
      src/dsa/rpo_falcon512/keys/public_key.rs
  21. +8
    -7
      src/dsa/rpo_falcon512/keys/secret_key.rs
  22. +8
    -7
      src/dsa/rpo_falcon512/math/ffsampling.rs
  23. +21
    -29
      src/dsa/rpo_falcon512/math/fft.rs
  24. +3
    -1
      src/dsa/rpo_falcon512/math/field.rs
  25. +7
    -5
      src/dsa/rpo_falcon512/math/mod.rs
  26. +14
    -8
      src/dsa/rpo_falcon512/math/polynomial.rs
  27. +15
    -14
      src/dsa/rpo_falcon512/math/samplerz.rs
  28. +5
    -3
      src/dsa/rpo_falcon512/mod.rs
  29. +6
    -4
      src/dsa/rpo_falcon512/signature.rs
  30. +2
    -1
      src/hash/blake/tests.rs
  31. +36
    -33
      src/hash/rescue/arch/x86_64_avx2.rs
  32. +6
    -5
      src/hash/rescue/mds/freq.rs
  33. +1
    -0
      src/hash/rescue/rpo/digest.rs
  34. +4
    -5
      src/hash/rescue/rpo/mod.rs
  35. +4
    -3
      src/hash/rescue/rpo/tests.rs
  36. +1
    -0
      src/hash/rescue/rpx/digest.rs
  37. +6
    -4
      src/hash/rescue/rpx/mod.rs
  38. +5
    -5
      src/merkle/error.rs
  39. +2
    -1
      src/merkle/mmr/delta.rs
  40. +2
    -2
      src/merkle/mmr/error.rs
  41. +4
    -2
      src/merkle/mmr/full.rs
  42. +4
    -4
      src/merkle/mmr/mod.rs
  43. +7
    -5
      src/merkle/mmr/partial.rs
  44. +11
    -11
      src/merkle/mmr/peaks.rs
  45. +2
    -1
      src/merkle/mmr/tests.rs
  46. +4
    -2
      src/merkle/partial_mt/tests.rs
  47. +5
    -5
      src/merkle/smt/full/error.rs
  48. +20
    -19
      src/merkle/smt/full/leaf.rs
  49. +8
    -7
      src/merkle/smt/full/mod.rs
  50. +3
    -2
      src/merkle/smt/full/proof.rs
  51. +2
    -1
      src/merkle/smt/full/tests.rs
  52. +2
    -1
      src/merkle/smt/mod.rs
  53. +2
    -1
      src/merkle/smt/simple/mod.rs
  54. +2
    -1
      src/merkle/smt/simple/tests.rs
  55. +6
    -6
      src/merkle/store/mod.rs
  56. +6
    -7
      src/merkle/store/tests.rs
  57. +6
    -4
      src/rand/rpo.rs
  58. +6
    -4
      src/rand/rpx.rs
  59. +4
    -3
      src/utils/kv_map.rs
  60. +4
    -4
      src/utils/mod.rs

+ 3
- 0
.config/nextest.toml

@ -0,0 +1,3 @@
[profile.default]
failure-output = "immediate-final"
fail-fast = false

+ 25
- 0
.github/workflows/build.yml

@ -0,0 +1,25 @@
# Runs build related jobs.
name: build
on:
push:
branches: [main, next]
pull_request:
types: [opened, reopened, synchronize]
jobs:
no-std:
name: Build for no-std
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
toolchain: [stable, nightly]
steps:
- uses: actions/checkout@main
- name: Build for no-std
run: |
rustup update --no-self-update ${{ matrix.toolchain }}
rustup target add wasm32-unknown-unknown
make build-no-std

+ 23
- 0
.github/workflows/changelog.yml

@ -0,0 +1,23 @@
# Runs changelog related jobs.
# CI job heavily inspired by: https://github.com/tarides/changelog-check-action
name: changelog
on:
pull_request:
types: [opened, reopened, synchronize, labeled, unlabeled]
jobs:
changelog:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@main
with:
fetch-depth: 0
- name: Check for changes in changelog
env:
BASE_REF: ${{ github.event.pull_request.base.ref }}
NO_CHANGELOG_LABEL: ${{ contains(github.event.pull_request.labels.*.name, 'no changelog') }}
run: ./scripts/check-changelog.sh "${{ inputs.changelog }}"
shell: bash

+ 0
- 31
.github/workflows/doc.yml

@ -1,31 +0,0 @@
# Runs documentation related jobs.
name: doc
on:
push:
branches:
- main
pull_request:
types: [opened, reopened, synchronize]
jobs:
docs:
name: Verify the docs on ${{matrix.toolchain}}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
toolchain: [stable]
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install rust
uses: actions-rs/toolchain@v1
with:
toolchain: ${{matrix.toolchain}}
override: true
- uses: davidB/rust-cargo-make@v1
- name: cargo make - doc
run: cargo make doc

+ 33
- 46
.github/workflows/lint.yml

@ -4,63 +4,50 @@ name: lint
on:
push:
branches:
- main
branches: [main, next]
pull_request:
types: [opened, reopened, synchronize]
jobs:
version:
name: check rust version consistency
clippy:
name: clippy nightly on ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
profile: minimal
override: true
- name: check rust versions
run: ./scripts/check-rust-version.sh
- uses: actions/checkout@main
- name: Clippy
run: |
rustup update --no-self-update nightly
rustup +nightly component add clippy
make clippy
rustfmt:
name: rustfmt ${{matrix.toolchain}} on ${{matrix.os}}
runs-on: ${{matrix.os}}-latest
strategy:
fail-fast: false
matrix:
toolchain: [nightly]
os: [ubuntu]
name: rustfmt check nightly on ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install minimal Rust with rustfmt
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{matrix.toolchain}}
components: rustfmt
override: true
- uses: davidB/rust-cargo-make@v1
- name: cargo make - format-check
run: cargo make format-check
- uses: actions/checkout@main
- name: Rustfmt
run: |
rustup update --no-self-update nightly
rustup +nightly component add rustfmt
make format-check
clippy:
name: clippy ${{matrix.toolchain}} on ${{matrix.os}}
runs-on: ${{matrix.os}}-latest
strategy:
fail-fast: false
matrix:
toolchain: [stable]
os: [ubuntu]
doc:
name: doc stable on ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install minimal Rust with clippy
uses: actions-rs/toolchain@v1
- uses: actions/checkout@main
- name: Build docs
run: |
rustup update --no-self-update
make doc
version:
name: check rust version consistency
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@main
with:
profile: minimal
toolchain: ${{matrix.toolchain}}
components: clippy
override: true
- uses: davidB/rust-cargo-make@v1
- name: cargo make - clippy
run: cargo make clippy
- name: check rust versions
run: ./scripts/check-rust-version.sh

+ 0
- 32
.github/workflows/no-std.yml

@ -1,32 +0,0 @@
# Runs no-std related jobs.
name: no-std
on:
push:
branches:
- main
pull_request:
types: [opened, reopened, synchronize]
jobs:
no-std:
name: build ${{matrix.toolchain}} no-std for wasm32-unknown-unknown
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
toolchain: [stable, nightly]
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install rust
uses: actions-rs/toolchain@v1
with:
toolchain: ${{matrix.toolchain}}
override: true
- run: rustup target add wasm32-unknown-unknown
- uses: davidB/rust-cargo-make@v1
- name: cargo make - build-no-std
run: cargo make build-no-std

+ 10
- 16
.github/workflows/test.yml

@ -1,34 +1,28 @@
# Runs testing related jobs
# Runs test related jobs.
name: test
on:
push:
branches:
- main
branches: [main, next]
pull_request:
types: [opened, reopened, synchronize]
jobs:
test:
name: test ${{matrix.toolchain}} on ${{matrix.os}} with ${{matrix.features}}
name: test ${{matrix.toolchain}} on ${{matrix.os}} with ${{matrix.args}}
runs-on: ${{matrix.os}}-latest
strategy:
fail-fast: false
matrix:
toolchain: [stable, nightly]
os: [ubuntu]
features: ["test", "test-no-default-features"]
args: [default, no-std]
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install rust
uses: actions-rs/toolchain@v1
with:
toolchain: ${{matrix.toolchain}}
override: true
- uses: davidB/rust-cargo-make@v1
- name: cargo make - test
run: cargo make ${{matrix.features}}
- uses: actions/checkout@main
- uses: taiki-e/install-action@nextest
- name: Perform tests
run: |
rustup update --no-self-update ${{matrix.toolchain}}
make test-${{matrix.args}}

+ 31
- 40
.pre-commit-config.yaml

@ -1,43 +1,34 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-json
- id: check-toml
- id: pretty-format-json
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: detect-private-key
- repo: https://github.com/hackaugusto/pre-commit-cargo
rev: v1.0.0
hooks:
# Allows cargo fmt to modify the source code prior to the commit
- id: cargo
name: Cargo fmt
args: ["+stable", "fmt", "--all"]
stages: [commit]
# Requires code to be properly formatted prior to pushing upstream
- id: cargo
name: Cargo fmt --check
args: ["+stable", "fmt", "--all", "--check"]
stages: [push, manual]
- id: cargo
name: Cargo check --all-targets
args: ["+stable", "check", "--all-targets"]
- id: cargo
name: Cargo check --all-targets --no-default-features
args: ["+stable", "check", "--all-targets", "--no-default-features"]
- id: cargo
name: Cargo check --all-targets --features default,std,serde
args: ["+stable", "check", "--all-targets", "--features", "default,std,serde"]
# Unlike fmt, clippy will not be automatically applied
- id: cargo
name: Cargo clippy
args: ["+nightly", "clippy", "--workspace", "--", "--deny", "clippy::all", "--deny", "warnings"]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-json
- id: check-toml
- id: pretty-format-json
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: detect-private-key
- repo: local
hooks:
- id: lint
name: Make lint
stages: [commit]
language: rust
entry: make lint
- id: doc
name: Make doc
stages: [commit]
language: rust
entry: make doc
- id: check
name: Make check
stages: [commit]
language: rust
entry: make check

+ 43
- 42
CHANGELOG.md

@ -2,88 +2,89 @@
- [BREAKING]: renamed `Mmr::open()` into `Mmr::open_at()` and `Mmr::peaks()` into `Mmr::peaks_at()` (#234).
- Added `Mmr::open()` and `Mmr::peaks()` which rely on `Mmr::open_at()` and `Mmr::peaks()` respectively (#234).
- Standardised CI and Makefile across Miden repos (#323).
## 0.10.0 (2024-08-06)
* Added more `RpoDigest` and `RpxDigest` conversions (#311).
* [BREAKING] Migrated to Winterfell v0.9 (#315).
* Fixed encoding of Falcon secret key (#319).
- Added more `RpoDigest` and `RpxDigest` conversions (#311).
- [BREAKING] Migrated to Winterfell v0.9 (#315).
- Fixed encoding of Falcon secret key (#319).
## 0.9.3 (2024-04-24)
* Added `RpxRandomCoin` struct (#307).
- Added `RpxRandomCoin` struct (#307).
## 0.9.2 (2024-04-21)
* Implemented serialization for the `Smt` struct (#304).
* Fixed a bug in Falcon signature generation (#305).
- Implemented serialization for the `Smt` struct (#304).
- Fixed a bug in Falcon signature generation (#305).
## 0.9.1 (2024-04-02)
* Added `num_leaves()` method to `SimpleSmt` (#302).
- Added `num_leaves()` method to `SimpleSmt` (#302).
## 0.9.0 (2024-03-24)
* [BREAKING] Removed deprecated re-exports from liballoc/libstd (#290).
* [BREAKING] Refactored RpoFalcon512 signature to work with pure Rust (#285).
* [BREAKING] Added `RngCore` as supertrait for `FeltRng` (#299).
- [BREAKING] Removed deprecated re-exports from liballoc/libstd (#290).
- [BREAKING] Refactored RpoFalcon512 signature to work with pure Rust (#285).
- [BREAKING] Added `RngCore` as supertrait for `FeltRng` (#299).
# 0.8.4 (2024-03-17)
* Re-added unintentionally removed re-exported liballoc macros (`vec` and `format` macros).
- Re-added unintentionally removed re-exported liballoc macros (`vec` and `format` macros).
# 0.8.3 (2024-03-17)
* Re-added unintentionally removed re-exported liballoc macros (#292).
- Re-added unintentionally removed re-exported liballoc macros (#292).
# 0.8.2 (2024-03-17)
* Updated `no-std` approach to be in sync with winterfell v0.8.3 release (#290).
- Updated `no-std` approach to be in sync with winterfell v0.8.3 release (#290).
## 0.8.1 (2024-02-21)
* Fixed clippy warnings (#280)
- Fixed clippy warnings (#280)
## 0.8.0 (2024-02-14)
* Implemented the `PartialMmr` data structure (#195).
* Implemented RPX hash function (#201).
* Added `FeltRng` and `RpoRandomCoin` (#237).
* Accelerated RPO/RPX hash functions using AVX512 instructions (#234).
* Added `inner_nodes()` method to `PartialMmr` (#238).
* Improved `PartialMmr::apply_delta()` (#242).
* Refactored `SimpleSmt` struct (#245).
* Replaced `TieredSmt` struct with `Smt` struct (#254, #277).
* Updated Winterfell dependency to v0.8 (#275).
- Implemented the `PartialMmr` data structure (#195).
- Implemented RPX hash function (#201).
- Added `FeltRng` and `RpoRandomCoin` (#237).
- Accelerated RPO/RPX hash functions using AVX512 instructions (#234).
- Added `inner_nodes()` method to `PartialMmr` (#238).
- Improved `PartialMmr::apply_delta()` (#242).
- Refactored `SimpleSmt` struct (#245).
- Replaced `TieredSmt` struct with `Smt` struct (#254, #277).
- Updated Winterfell dependency to v0.8 (#275).
## 0.7.1 (2023-10-10)
* Fixed RPO Falcon signature build on Windows.
- Fixed RPO Falcon signature build on Windows.
## 0.7.0 (2023-10-05)
* Replaced `MerklePathSet` with `PartialMerkleTree` (#165).
* Implemented clearing of nodes in `TieredSmt` (#173).
* Added ability to generate inclusion proofs for `TieredSmt` (#174).
* Implemented Falcon DSA (#179).
* Added conditional `serde`` support for various structs (#180).
* Implemented benchmarking for `TieredSmt` (#182).
* Added more leaf traversal methods for `MerkleStore` (#185).
* Added SVE acceleration for RPO hash function (#189).
- Replaced `MerklePathSet` with `PartialMerkleTree` (#165).
- Implemented clearing of nodes in `TieredSmt` (#173).
- Added ability to generate inclusion proofs for `TieredSmt` (#174).
- Implemented Falcon DSA (#179).
- Added conditional `serde`` support for various structs (#180).
- Implemented benchmarking for `TieredSmt` (#182).
- Added more leaf traversal methods for `MerkleStore` (#185).
- Added SVE acceleration for RPO hash function (#189).
## 0.6.0 (2023-06-25)
* [BREAKING] Added support for recording capabilities for `MerkleStore` (#162).
* [BREAKING] Refactored Merkle struct APIs to use `RpoDigest` instead of `Word` (#157).
* Added initial implementation of `PartialMerkleTree` (#156).
- [BREAKING] Added support for recording capabilities for `MerkleStore` (#162).
- [BREAKING] Refactored Merkle struct APIs to use `RpoDigest` instead of `Word` (#157).
- Added initial implementation of `PartialMerkleTree` (#156).
## 0.5.0 (2023-05-26)
* Implemented `TieredSmt` (#152, #153).
* Implemented ability to extract a subset of a `MerkleStore` (#151).
* Cleaned up `SimpleSmt` interface (#149).
* Decoupled hashing and padding of peaks in `Mmr` (#148).
* Added `inner_nodes()` to `MerkleStore` (#146).
- Implemented `TieredSmt` (#152, #153).
- Implemented ability to extract a subset of a `MerkleStore` (#151).
- Cleaned up `SimpleSmt` interface (#149).
- Decoupled hashing and padding of peaks in `Mmr` (#148).
- Added `inner_nodes()` to `MerkleStore` (#146).
## 0.4.0 (2023-04-21)
@ -131,6 +132,6 @@
- Initial release on crates.io containing the cryptographic primitives used in Miden VM and the Miden Rollup.
- Hash module with the BLAKE3 and Rescue Prime Optimized hash functions.
- BLAKE3 is implemented with 256-bit, 192-bit, or 160-bit output.
- RPO is implemented with 256-bit output.
- BLAKE3 is implemented with 256-bit, 192-bit, or 160-bit output.
- RPO is implemented with 256-bit output.
- Merkle module, with a set of data structures related to Merkle trees, implemented using the RPO hash function.

+ 86
- 0
Makefile

@ -0,0 +1,86 @@
.DEFAULT_GOAL := help
.PHONY: help
help:
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
# -- variables --------------------------------------------------------------------------------------
WARNINGS=RUSTDOCFLAGS="-D warnings"
DEBUG_OVERFLOW_INFO=RUSTFLAGS="-C debug-assertions -C overflow-checks -C debuginfo=2"
# -- linting --------------------------------------------------------------------------------------
.PHONY: clippy
clippy: ## Run Clippy with configs
$(WARNINGS) cargo +nightly clippy --workspace --all-targets --all-features
.PHONY: fix
fix: ## Run Fix with configs
cargo +nightly fix --allow-staged --allow-dirty --all-targets --all-features
.PHONY: format
format: ## Run Format using nightly toolchain
cargo +nightly fmt --all
.PHONY: format-check
format-check: ## Run Format using nightly toolchain but only in check mode
cargo +nightly fmt --all --check
.PHONY: lint
lint: format fix clippy ## Run all linting tasks at once (Clippy, fixing, formatting)
# --- docs ----------------------------------------------------------------------------------------
.PHONY: doc
doc: ## Generate and check documentation
$(WARNINGS) cargo doc --all-features --keep-going --release
# --- testing -------------------------------------------------------------------------------------
.PHONY: test-default
test-default: ## Run tests with default features
$(DEBUG_OVERFLOW_INFO) cargo nextest run --profile default --release --all-features
.PHONY: test-no-std
test-no-std: ## Run tests with `no-default-features` (std)
$(DEBUG_OVERFLOW_INFO) cargo nextest run --profile default --release --no-default-features
.PHONY: test
test: test-default test-no-std ## Run all tests
# --- checking ------------------------------------------------------------------------------------
.PHONY: check
check: ## Check all targets and features for errors without code generation
cargo check --all-targets --all-features
# --- building ------------------------------------------------------------------------------------
.PHONY: build
build: ## Build with default features enabled
cargo build --release
.PHONY: build-no-std
build-no-std: ## Build without the standard library
cargo build --release --no-default-features --target wasm32-unknown-unknown
.PHONY: build-avx2
build-avx2: ## Build with avx2 support
RUSTFLAGS="-C target-feature=+avx2" cargo build --release
.PHONY: build-sve
build-sve: ## Build with sve support
RUSTFLAGS="-C target-feature=+sve" cargo build --release
# --- benchmarking --------------------------------------------------------------------------------
.PHONY: bench-tx
bench-tx: ## Run crypto benchmarks
cargo bench

+ 0
- 86
Makefile.toml

@ -1,86 +0,0 @@
# Cargo Makefile
# -- linting --------------------------------------------------------------------------------------
[tasks.format]
toolchain = "nightly"
command = "cargo"
args = ["fmt", "--all"]
[tasks.format-check]
toolchain = "nightly"
command = "cargo"
args = ["fmt", "--all", "--", "--check"]
[tasks.clippy-default]
command = "cargo"
args = ["clippy","--workspace", "--all-targets", "--", "-D", "clippy::all", "-D", "warnings"]
[tasks.clippy-all-features]
command = "cargo"
args = ["clippy","--workspace", "--all-targets", "--all-features", "--", "-D", "clippy::all", "-D", "warnings"]
[tasks.clippy]
dependencies = [
"clippy-default",
"clippy-all-features"
]
[tasks.fix]
description = "Runs Fix"
command = "cargo"
toolchain = "nightly"
args = ["fix", "--allow-staged", "--allow-dirty", "--all-targets", "--all-features"]
[tasks.lint]
description = "Runs all linting tasks (Clippy, fixing, formatting)"
run_task = { name = ["format", "format-check", "clippy", "docs"] }
# --- docs ----------------------------------------------------------------------------------------
[tasks.doc]
env = { "RUSTDOCFLAGS" = "-D warnings" }
command = "cargo"
args = ["doc", "--all-features", "--keep-going", "--release"]
# --- testing -------------------------------------------------------------------------------------
[tasks.test]
description = "Run tests with default features"
env = { "RUSTFLAGS" = "-C debug-assertions -C overflow-checks -C debuginfo=2" }
workspace = false
command = "cargo"
args = ["test", "--release"]
[tasks.test-no-default-features]
description = "Run tests with no-default-features"
env = { "RUSTFLAGS" = "-C debug-assertions -C overflow-checks -C debuginfo=2" }
workspace = false
command = "cargo"
args = ["test", "--release", "--no-default-features"]
[tasks.test-all]
description = "Run all tests"
workspace = false
run_task = { name = ["test", "test-no-default-features"], parallel = true }
# --- building ------------------------------------------------------------------------------------
[tasks.build]
description = "Build in release mode"
command = "cargo"
args = ["build", "--release"]
[tasks.build-no-std]
description = "Build using no-std"
command = "cargo"
args = ["build", "--release", "--no-default-features", "--target", "wasm32-unknown-unknown"]
[tasks.build-avx2]
description = "Build using AVX2 acceleration"
env = { "RUSTFLAGS" = "-C target-feature=+avx2" }
command = "cargo"
args = ["build", "--release"]
[tasks.build-sve]
description = "Build with SVE acceleration"
env = { "RUSTFLAGS" = "-C target-feature=+sve" }
command = "cargo"
args = ["build", "--release"]

+ 49
- 27
README.md

@ -2,85 +2,107 @@
[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/0xPolygonMiden/crypto/blob/main/LICENSE)
[![test](https://github.com/0xPolygonMiden/crypto/actions/workflows/test.yml/badge.svg)](https://github.com/0xPolygonMiden/crypto/actions/workflows/test.yml)
[![no-std](https://github.com/0xPolygonMiden/crypto/actions/workflows/no-std.yml/badge.svg)](https://github.com/0xPolygonMiden/crypto/actions/workflows/no-std.yml)
[![RUST_VERSION](https://img.shields.io/badge/rustc-1.80+-lightgray.svg)]()
[![build](https://github.com/0xPolygonMiden/crypto/actions/workflows/build.yml/badge.svg)](https://github.com/0xPolygonMiden/crypto/actions/workflows/build.yml)
[![RUST_VERSION](https://img.shields.io/badge/rustc-1.80+-lightgray.svg)](https://www.rust-lang.org/tools/install)
[![CRATE](https://img.shields.io/crates/v/miden-crypto)](https://crates.io/crates/miden-crypto)
This crate contains cryptographic primitives used in Polygon Miden.
## Hash
[Hash module](./src/hash) provides a set of cryptographic hash functions which are used by the Miden VM and the Miden rollup. Currently, these functions are:
* [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3.
* [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs.
* [RPX](https://eprint.iacr.org/2023/1045) hash function with 256-bit output. Similar to RPO, this hash function is suitable for recursive STARKs but it is about 2x faster as compared to RPO.
- [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3.
- [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs.
- [RPX](https://eprint.iacr.org/2023/1045) hash function with 256-bit output. Similar to RPO, this hash function is suitable for recursive STARKs but it is about 2x faster as compared to RPO.
For performance benchmarks of these hash functions and their comparison to other popular hash functions please see [here](./benches/).
## Merkle
[Merkle module](./src/merkle/) provides a set of data structures related to Merkle trees. All these data structures are implemented using the RPO hash function described above. The data structures are:
* `MerkleStore`: a collection of Merkle trees of different heights designed to efficiently store trees with common subtrees. When instantiated with `RecordingMap`, a Merkle store records all accesses to the original data.
* `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64.
* `Mmr`: a Merkle mountain range structure designed to function as an append-only log.
* `PartialMerkleTree`: a partial view of a Merkle tree where some sub-trees may not be known. This is similar to a collection of Merkle paths all resolving to the same root. The length of the paths can be at most 64.
* `PartialMmr`: a partial view of a Merkle mountain range structure.
* `SimpleSmt`: a Sparse Merkle Tree (with no compaction), mapping 64-bit keys to 4-element values.
* `Smt`: a Sparse Merkle tree (with compaction at depth 64), mapping 4-element keys to 4-element values.
- `MerkleStore`: a collection of Merkle trees of different heights designed to efficiently store trees with common subtrees. When instantiated with `RecordingMap`, a Merkle store records all accesses to the original data.
- `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64.
- `Mmr`: a Merkle mountain range structure designed to function as an append-only log.
- `PartialMerkleTree`: a partial view of a Merkle tree where some sub-trees may not be known. This is similar to a collection of Merkle paths all resolving to the same root. The length of the paths can be at most 64.
- `PartialMmr`: a partial view of a Merkle mountain range structure.
- `SimpleSmt`: a Sparse Merkle Tree (with no compaction), mapping 64-bit keys to 4-element values.
- `Smt`: a Sparse Merkle tree (with compaction at depth 64), mapping 4-element keys to 4-element values.
The module also contains additional supporting components such as `NodeIndex`, `MerklePath`, and `MerkleError` to assist with tree indexation, opening proofs, and reporting inconsistent arguments/state.
The module also contains additional supporting components such as `NodeIndex`, `MerklePath`, and `MerkleError` to assist with tree indexation, opening proofs, and reporting inconsistent arguments/state.
## Signatures
[DSA module](./src/dsa) provides a set of digital signature schemes supported by default in the Miden VM. Currently, these schemes are:
* `RPO Falcon512`: a variant of the [Falcon](https://falcon-sign.info/) signature scheme. This variant differs from the standard in that instead of using SHAKE256 hash function in the *hash-to-point* algorithm we use RPO256. This makes the signature more efficient to verify in Miden VM.
- `RPO Falcon512`: a variant of the [Falcon](https://falcon-sign.info/) signature scheme. This variant differs from the standard in that instead of using SHAKE256 hash function in the _hash-to-point_ algorithm we use RPO256. This makes the signature more efficient to verify in Miden VM.
For the above signatures, key generation, signing, and signature verification are available for both `std` and `no_std` contexts (see [crate features](#crate-features) below). However, in `no_std` context, the user is responsible for supplying the key generation and signing procedures with a random number generator.
## Pseudo-Random Element Generator
[Pseudo random element generator module](./src/rand/) provides a set of traits and data structures that facilitate generating pseudo-random elements in the context of Miden VM and Miden rollup. The module currently includes:
* `FeltRng`: a trait for generating random field elements and random 4 field elements.
* `RpoRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPO hash function.
* `RpxRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPX hash function.
- `FeltRng`: a trait for generating random field elements and random 4 field elements.
- `RpoRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPO hash function.
- `RpxRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPX hash function.
## Make commands
We use `make` to automate building, testing, and other processes. In most cases, `make` commands are wrappers around `cargo` commands with specific arguments. You can view the list of available commands in the [Makefile](Makefile), or run the following command:
```shell
make
```
## Crate features
This crate can be compiled with the following features:
* `std` - enabled by default and relies on the Rust standard library.
* `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
- `std` - enabled by default and relies on the Rust standard library.
- `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
Both of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections.
To compile with `no_std`, disable default features via `--no-default-features` flag.
To compile with `no_std`, disable default features via `--no-default-features` flag or using the following command:
```shell
make build-no-std
```
### AVX2 acceleration
On platforms with [AVX2](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable AVX2 acceleration, the code needs to be compiled with the `avx2` target feature enabled. For example:
```shell
cargo make build-avx2
make build-avx2
```
### SVE acceleration
On platforms with [SVE](https://en.wikipedia.org/wiki/AArch64#Scalable_Vector_Extension_(SVE)) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable SVE acceleration, the code needs to be compiled with the `sve` target feature enabled. For example:
On platforms with [SVE](<https://en.wikipedia.org/wiki/AArch64#Scalable_Vector_Extension_(SVE)>) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable SVE acceleration, the code needs to be compiled with the `sve` target feature enabled. For example:
```shell
cargo make build-sve
make build-sve
```
## Testing
The best way to test the library is using our `Makefile.toml` and [cargo-make](https://github.com/sagiegurari/cargo-make), this will enable you to use our pre-defined optimized testing commands:
The best way to test the library is using our [Makefile](Makefile), this will enable you to use our pre-defined optimized testing commands:
```shell
cargo make test-all
make test
```
For example, some of the functions are heavy and might take a while for the tests to complete if using simply `cargo test`. In order to test in release and optimized mode, we have to replicate the test conditions of the development mode so all debug assertions can be verified.
We do that by enabling some special [flags](https://doc.rust-lang.org/cargo/reference/profiles.html) for the compilation (which we have set as a default in our [Makefile.toml](Makefile.toml)):
We do that by enabling some special [flags](https://doc.rust-lang.org/cargo/reference/profiles.html) for the compilation (which we have set as a default in our [Makefile](Makefile)):
```shell
RUSTFLAGS="-C debug-assertions -C overflow-checks -C debuginfo=2" cargo test --release
```
## License
This project is [MIT licensed](./LICENSE).

+ 0
- 1
rust-toolchain

@ -1 +0,0 @@
1.80

+ 5
- 0
rust-toolchain.toml

@ -0,0 +1,5 @@
[toolchain]
channel = "1.80"
components = ["rustfmt", "rust-src", "clippy"]
targets = ["wasm32-unknown-unknown"]
profile = "minimal"

+ 13
- 11
rustfmt.toml

@ -2,20 +2,22 @@ edition = "2021"
array_width = 80
attr_fn_like_width = 80
chain_width = 80
#condense_wildcard_suffixes = true
#enum_discrim_align_threshold = 40
comment_width = 100
condense_wildcard_suffixes = true
fn_call_width = 80
#fn_single_line = true
#format_code_in_doc_comments = true
#format_macro_matchers = true
#format_strings = true
#group_imports = "StdExternalCrate"
#hex_literal_case = "Lower"
#imports_granularity = "Crate"
format_code_in_doc_comments = true
format_macro_matchers = true
group_imports = "StdExternalCrate"
hex_literal_case = "Lower"
imports_granularity = "Crate"
match_block_trailing_comma = true
newline_style = "Unix"
#normalize_doc_attributes = true
#reorder_impl_items = true
reorder_imports = true
reorder_modules = true
single_line_if_else_max_width = 60
single_line_let_else_max_width = 60
struct_lit_width = 40
struct_variant_width = 40
use_field_init_shorthand = true
use_try_shorthand = true
wrap_comments = true

+ 21
- 0
scripts/check-changelog.sh

@ -0,0 +1,21 @@
#!/bin/bash
set -uo pipefail
CHANGELOG_FILE="${1:-CHANGELOG.md}"
if [ "${NO_CHANGELOG_LABEL}" = "true" ]; then
# 'no changelog' set, so finish successfully
echo "\"no changelog\" label has been set"
exit 0
else
# a changelog check is required
# fail if the diff is empty
if git diff --exit-code "origin/${BASE_REF}" -- "${CHANGELOG_FILE}"; then
>&2 echo "Changes should come with an entry in the \"CHANGELOG.md\" file. This behavior
can be overridden by using the \"no changelog\" label, which is used for changes
that are trivial / explicitely stated not to require a changelog entry."
exit 1
fi
echo "The \"CHANGELOG.md\" file has been updated."
fi

+ 6
- 4
scripts/check-rust-version.sh

@ -1,10 +1,12 @@
#!/bin/bash
# Check rust-toolchain file
TOOLCHAIN_VERSION=$(cat rust-toolchain)
# Get rust-toolchain.toml file channel
TOOLCHAIN_VERSION=$(grep 'channel' rust-toolchain.toml | sed -E 's/.*"(.*)".*/\1/')
# Check workspace Cargo.toml file
CARGO_VERSION=$(cat Cargo.toml | grep "rust-version" | cut -d '"' -f 2)
# Get workspace Cargo.toml file rust-version
CARGO_VERSION=$(grep 'rust-version' Cargo.toml | sed -E 's/.*"(.*)".*/\1/')
# Check version match
if [ "$CARGO_VERSION" != "$TOOLCHAIN_VERSION" ]; then
echo "Mismatch in Cargo.toml: Expected $TOOLCHAIN_VERSION, found $CARGO_VERSION"
exit 1

+ 3
- 1
src/dsa/rpo_falcon512/hash_to_point.rs

@ -1,7 +1,9 @@
use super::{math::FalconFelt, Nonce, Polynomial, Rpo256, Word, MODULUS, N, ZERO};
use alloc::vec::Vec;
use num::Zero;
use super::{math::FalconFelt, Nonce, Polynomial, Rpo256, Word, MODULUS, N, ZERO};
// HASH-TO-POINT FUNCTIONS
// ================================================================================================

+ 2
- 1
src/dsa/rpo_falcon512/keys/mod.rs

@ -15,12 +15,13 @@ pub use secret_key::SecretKey;
#[cfg(test)]
mod tests {
use crate::{dsa::rpo_falcon512::SecretKey, Word, ONE};
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
use winter_math::FieldElement;
use winter_utils::{Deserializable, Serializable};
use crate::{dsa::rpo_falcon512::SecretKey, Word, ONE};
#[test]
fn test_falcon_verification() {
let seed = [0_u8; 32];

+ 6
- 5
src/dsa/rpo_falcon512/keys/public_key.rs

@ -1,13 +1,14 @@
use crate::dsa::rpo_falcon512::FALCON_ENCODING_BITS;
use alloc::string::ToString;
use core::ops::Deref;
use num::Zero;
use super::{
super::{Rpo256, LOG_N, N, PK_LEN},
ByteReader, ByteWriter, Deserializable, DeserializationError, FalconFelt, Felt, Polynomial,
Serializable, Signature, Word,
};
use alloc::string::ToString;
use core::ops::Deref;
use num::Zero;
use crate::dsa::rpo_falcon512::FALCON_ENCODING_BITS;
// PUBLIC KEY
// ================================================================================================
@ -116,7 +117,7 @@ impl Deserializable for PubKeyPoly {
if acc_len >= FALCON_ENCODING_BITS {
acc_len -= FALCON_ENCODING_BITS;
let w = (acc >> acc_len) & 0x3FFF;
let w = (acc >> acc_len) & 0x3fff;
let element = w.try_into().map_err(|err| {
DeserializationError::InvalidValue(format!(
"Failed to decode public key: {err}"

+ 8
- 7
src/dsa/rpo_falcon512/keys/secret_key.rs

@ -1,3 +1,11 @@
use alloc::{string::ToString, vec::Vec};
use num::Complex;
#[cfg(not(feature = "std"))]
use num::Float;
use num_complex::Complex64;
use rand::Rng;
use super::{
super::{
math::{ffldl, ffsampling, gram, normalize_tree, FalconFelt, FastFft, LdlTree, Polynomial},
@ -10,13 +18,6 @@ use super::{
use crate::dsa::rpo_falcon512::{
hash_to_point::hash_to_point_rpo256, math::ntru_gen, SIG_NONCE_LEN, SK_LEN,
};
use alloc::{string::ToString, vec::Vec};
use num::Complex;
use num_complex::Complex64;
use rand::Rng;
#[cfg(not(feature = "std"))]
use num::Float;
// CONSTANTS
// ================================================================================================

+ 8
- 7
src/dsa/rpo_falcon512/math/ffsampling.rs

@ -1,11 +1,12 @@
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
use alloc::boxed::Box;
#[cfg(not(feature = "std"))]
use num::Float;
use num::{One, Zero};
use num_complex::{Complex, Complex64};
use rand::Rng;
#[cfg(not(feature = "std"))]
use num::Float;
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
const SIGMIN: f64 = 1.2778336969128337;
@ -80,11 +81,11 @@ pub fn normalize_tree(tree: &mut LdlTree, sigma: f64) {
LdlTree::Branch(_ell, left, right) => {
normalize_tree(left, sigma);
normalize_tree(right, sigma);
}
},
LdlTree::Leaf(vector) => {
vector[0] = Complex::new(sigma / vector[0].re.sqrt(), 0.0);
vector[1] = Complex64::zero();
}
},
}
}
@ -110,7 +111,7 @@ pub fn ffsampling(
let z0 = Polynomial::<Complex64>::merge_fft(&bold_z0.0, &bold_z0.1);
(z0, z1)
}
},
LdlTree::Leaf(value) => {
let z0 = sampler_z(t.0.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
let z1 = sampler_z(t.1.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
@ -118,6 +119,6 @@ pub fn ffsampling(
Polynomial::new(vec![Complex64::new(z0 as f64, 0.0)]),
Polynomial::new(vec![Complex64::new(z1 as f64, 0.0)]),
)
}
},
}
}

+ 21
- 29
src/dsa/rpo_falcon512/math/fft.rs

@ -1,14 +1,15 @@
use super::{field::FalconFelt, polynomial::Polynomial, Inverse};
use alloc::vec::Vec;
use core::{
f64::consts::PI,
ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
};
use num::{One, Zero};
use num_complex::Complex64;
#[cfg(not(feature = "std"))]
use num::Float;
use num::{One, Zero};
use num_complex::Complex64;
use super::{field::FalconFelt, polynomial::Polynomial, Inverse};
/// Implements Cyclotomic FFT without bitreversing the outputs, and using precomputed powers of the
/// 2n-th primitive root of unity.
@ -102,7 +103,8 @@ where
array
}
/// Reorders the given elements in the array by reversing the binary expansions of their indices.
/// Reorders the given elements in the array by reversing the binary expansions of their
/// indices.
fn bitreverse_array<T>(array: &mut [T]) {
let n = array.len();
for i in 0..n {
@ -118,19 +120,14 @@ where
///
/// Arguments:
///
/// - a : &mut [Self]
/// (a reference to) a mutable array of field elements which is to
/// be transformed under the FFT. The transformation happens in-
/// place.
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
/// transformed under the FFT. The transformation happens in- place.
///
/// - psi_rev: &[Self]
/// (a reference to) an array of powers of psi, from 0 to n-1,
/// but ordered by bit-reversed index. Here psi is a primitive root
/// of order 2n. You can use
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this
/// trait implementation is not const. For the performance benefit
/// you want a precompiled array, which you can get if you can get
/// by implementing the same method and marking it "const".
/// - psi_rev: &[Self] (a reference to) an array of powers of psi, from 0 to n-1, but ordered
/// by bit-reversed index. Here psi is a primitive root of order 2n. You can use
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this trait implementation is not
/// const. For the performance benefit you want a precompiled array, which you can get if you
/// can get by implementing the same method and marking it "const".
fn fft(a: &mut [Self], psi_rev: &[Self]) {
let n = a.len();
let mut t = n;
@ -158,20 +155,15 @@ where
///
/// Arguments:
///
/// - a : &mut [Self]
/// (a reference to) a mutable array of field elements which is to
/// be transformed under the IFFT. The transformation happens in-
/// place.
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
/// transformed under the IFFT. The transformation happens in- place.
///
/// - psi_inv_rev: &[Self]
/// (a reference to) an array of powers of psi^-1, from 0 to n-1,
/// but ordered by bit-reversed index. Here psi is a primitive root of
/// order 2n. You can use
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for
/// this purpose, but this trait implementation is not const. For
/// the performance benefit you want a precompiled array, which you
/// can get if you can get by implementing the same methods and marking
/// them "const".
/// - psi_inv_rev: &[Self] (a reference to) an array of powers of psi^-1, from 0 to n-1, but
/// ordered by bit-reversed index. Here psi is a primitive root of order 2n. You can use
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for this purpose, but this
/// trait implementation is not const. For the performance benefit you want a precompiled
/// array, which you can get if you can get by implementing the same methods and marking them
/// "const".
fn ifft(a: &mut [Self], psi_inv_rev: &[Self], ninv: Self) {
let n = a.len();
let mut t = 1;

+ 3
- 1
src/dsa/rpo_falcon512/math/field.rs

@ -1,8 +1,10 @@
use super::{fft::CyclotomicFourier, Inverse, MODULUS};
use alloc::string::String;
use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use num::{One, Zero};
use super::{fft::CyclotomicFourier, Inverse, MODULUS};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct FalconFelt(u32);

+ 7
- 5
src/dsa/rpo_falcon512/math/mod.rs

@ -2,17 +2,19 @@
//!
//! It uses and acknowledges the work in:
//!
//! 1. The [reference](https://falcon-sign.info/impl/README.txt.html) implementation by Thomas Pornin.
//! 1. The [reference](https://falcon-sign.info/impl/README.txt.html) implementation by Thomas
//! Pornin.
//! 2. The [Rust](https://github.com/aszepieniec/falcon-rust) implementation by Alan Szepieniec.
use super::MODULUS;
use alloc::{string::String, vec::Vec};
use core::ops::MulAssign;
#[cfg(not(feature = "std"))]
use num::Float;
use num::{BigInt, FromPrimitive, One, Zero};
use num_complex::Complex64;
use rand::Rng;
#[cfg(not(feature = "std"))]
use num::Float;
use super::MODULUS;
mod fft;
pub use fft::{CyclotomicFourier, FastFft};
@ -152,7 +154,7 @@ fn ntru_solve(
{
None
}
}
},
}
}

+ 14
- 8
src/dsa/rpo_falcon512/math/polynomial.rs

@ -1,12 +1,18 @@
use super::{field::FalconFelt, Inverse};
use crate::dsa::rpo_falcon512::{MODULUS, N};
use crate::Felt;
use alloc::vec::Vec;
use core::default::Default;
use core::fmt::Debug;
use core::ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign};
use core::{
default::Default,
fmt::Debug,
ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign},
};
use num::{One, Zero};
use super::{field::FalconFelt, Inverse};
use crate::{
dsa::rpo_falcon512::{MODULUS, N},
Felt,
};
#[derive(Debug, Clone, Default)]
pub struct Polynomial<F> {
pub coefficients: Vec<F>,
@ -134,8 +140,8 @@ impl<
Self::new(coefficients)
}
/// Computes the galois adjoint of the polynomial in the cyclotomic ring F\[ X \] / < X^n + 1 > ,
/// which corresponds to f(x^2).
/// Computes the galois adjoint of the polynomial in the cyclotomic ring F\[ X \] / < X^n + 1 >
/// , which corresponds to f(x^2).
pub fn galois_adjoint(&self) -> Self {
Self::new(
self.coefficients

+ 15
- 14
src/dsa/rpo_falcon512/math/samplerz.rs

@ -1,8 +1,8 @@
use core::f64::consts::LN_2;
use rand::Rng;
#[cfg(not(feature = "std"))]
use num::Float;
use rand::Rng;
/// Samples an integer from {0, ..., 18} according to the distribution χ, which is close to
/// the half-Gaussian distribution on the natural numbers with mean 0 and standard deviation
@ -40,18 +40,18 @@ fn approx_exp(x: f64, ccs: f64) -> u64 {
// https://eprint.iacr.org/2018/1234
// https://github.com/raykzhao/gaussian
const C: [u64; 13] = [
0x00000004741183A3u64,
0x00000036548CFC06u64,
0x0000024FDCBF140Au64,
0x0000171D939DE045u64,
0x0000D00CF58F6F84u64,
0x000680681CF796E3u64,
0x002D82D8305B0FEAu64,
0x011111110E066FD0u64,
0x0555555555070F00u64,
0x155555555581FF00u64,
0x400000000002B400u64,
0x7FFFFFFFFFFF4800u64,
0x00000004741183a3u64,
0x00000036548cfc06u64,
0x0000024fdcbf140au64,
0x0000171d939de045u64,
0x0000d00cf58f6f84u64,
0x000680681cf796e3u64,
0x002d82d8305b0feau64,
0x011111110e066fd0u64,
0x0555555555070f00u64,
0x155555555581ff00u64,
0x400000000002b400u64,
0x7fffffffffff4800u64,
0x8000000000000000u64,
];
@ -116,9 +116,10 @@ pub(crate) fn sampler_z(mu: f64, sigma: f64, sigma_min: f64, rng: &mut R
#[cfg(all(test, feature = "std"))]
mod test {
use alloc::vec::Vec;
use rand::RngCore;
use std::{thread::sleep, time::Duration};
use rand::RngCore;
use super::{approx_exp, ber_exp, sampler_z};
/// RNG used only for testing purposes, whereby the produced

+ 5
- 3
src/dsa/rpo_falcon512/mod.rs

@ -9,9 +9,11 @@ mod keys;
mod math;
mod signature;
pub use self::keys::{PubKeyPoly, PublicKey, SecretKey};
pub use self::math::Polynomial;
pub use self::signature::{Signature, SignatureHeader, SignaturePoly};
pub use self::{
keys::{PubKeyPoly, PublicKey, SecretKey},
math::Polynomial,
signature::{Signature, SignatureHeader, SignaturePoly},
};
// CONSTANTS
// ================================================================================================

+ 6
- 4
src/dsa/rpo_falcon512/signature.rs

@ -1,6 +1,8 @@
use alloc::{string::ToString, vec::Vec};
use core::ops::Deref;
use num::Zero;
use super::{
hash_to_point::hash_to_point_rpo256,
keys::PubKeyPoly,
@ -8,7 +10,6 @@ use super::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Felt, Nonce, Rpo256,
Serializable, Word, LOG_N, MODULUS, N, SIG_L2_BOUND, SIG_POLY_BYTE_LEN,
};
use num::Zero;
// FALCON SIGNATURE
// ================================================================================================
@ -38,8 +39,8 @@ use num::Zero;
/// The signature is serialized as:
/// 1. A header byte specifying the algorithm used to encode the coefficients of the `s2` polynomial
/// together with the degree of the irreducible polynomial phi. For RPO Falcon512, the header
/// byte is set to `10111001` which differentiates it from the standardized instantiation of
/// the Falcon signature.
/// byte is set to `10111001` which differentiates it from the standardized instantiation of the
/// Falcon signature.
/// 2. 40 bytes for the nonce.
/// 4. 625 bytes encoding the `s2` polynomial above.
///
@ -355,10 +356,11 @@ fn are_coefficients_valid(x: &[i16]) -> bool {
#[cfg(test)]
mod tests {
use super::{super::SecretKey, *};
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
use super::{super::SecretKey, *};
#[test]
fn test_serialization_round_trip() {
let seed = [0_u8; 32];

+ 2
- 1
src/hash/blake/tests.rs

@ -1,8 +1,9 @@
use alloc::vec::Vec;
use proptest::prelude::*;
use rand_utils::rand_vector;
use super::*;
use alloc::vec::Vec;
#[test]
fn blake3_hash_elements() {

+ 36
- 33
src/hash/rescue/arch/x86_64_avx2.rs

@ -4,40 +4,43 @@ use core::arch::x86_64::*;
// https://github.com/0xPolygonZero/plonky2/blob/main/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs
// Preliminary notes:
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily
// emulated. The method recognizes that for a + b overflowed iff (a + b) < a:
// i. res_lo = a_lo + b_lo
// ii. carry_mask = res_lo < a_lo
// iii. res_hi = a_hi + b_hi - carry_mask
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily emulated.
// The method recognizes that for a + b overflowed iff (a + b) < a:
// 1. res_lo = a_lo + b_lo
// 2. carry_mask = res_lo < a_lo
// 3. res_hi = a_hi + b_hi - carry_mask
//
// Notice that carry_mask is subtracted, not added. This is because AVX comparison instructions
// return -1 (all bits 1) for true and 0 for false.
//
// 2. AVX does not have unsigned 64-bit comparisons. Those can be emulated with signed comparisons
// by recognizing that a <u b iff a + (1 << 63) <s b + (1 << 63), where the addition wraps around
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts
// 1 << 63 to enable this trick.
// Example: addition with carry.
// i. a_lo_s = shift(a_lo)
// ii. res_lo_s = a_lo_s + b_lo
// iii. carry_mask = res_lo_s <s a_lo_s
// iv. res_lo = shift(res_lo_s)
// v. res_hi = a_hi + b_hi - carry_mask
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition is
// shifted if exactly one of the operands is shifted, as is the case on line ii. Line iii.
// performs a signed comparison res_lo_s <s a_lo_s on shifted values to emulate unsigned
// comparison res_lo <u a_lo on unshifted values. Finally, line iv. reverses the shift so the
// result can be returned.
// When performing a chain of calculations, we can often save instructions by letting the shift
// propagate through and only undoing it when necessary. For example, to compute the addition of
// three two-word (128-bit) numbers we can do:
// i. a_lo_s = shift(a_lo)
// ii. tmp_lo_s = a_lo_s + b_lo
// iii. tmp_carry_mask = tmp_lo_s <s a_lo_s
// iv. tmp_hi = a_hi + b_hi - tmp_carry_mask
// v. res_lo_s = tmp_lo_s + c_lo
// vi. res_carry_mask = res_lo_s <s tmp_lo_s
// vii. res_lo = shift(res_lo_s)
// viii. res_hi = tmp_hi + c_hi - res_carry_mask
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts 1
// << 63 to enable this trick. Addition with carry example:
// 1. a_lo_s = shift(a_lo)
// 2. res_lo_s = a_lo_s + b_lo
// 3. carry_mask = res_lo_s <s a_lo_s
// 4. res_lo = shift(res_lo_s)
// 5. res_hi = a_hi + b_hi - carry_mask
//
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition
// is shifted if exactly one of the operands is shifted, as is the case on
// line 2. Line 3. performs a signed comparison res_lo_s <s a_lo_s on shifted values to
// emulate unsigned comparison res_lo <u a_lo on unshifted values. Finally, line 4. reverses the
// shift so the result can be returned.
//
// When performing a chain of calculations, we can often save instructions by letting
// the shift propagate through and only undoing it when necessary.
// For example, to compute the addition of three two-word (128-bit) numbers we can do:
// 1. a_lo_s = shift(a_lo)
// 2. tmp_lo_s = a_lo_s + b_lo
// 3. tmp_carry_mask = tmp_lo_s <s a_lo_s
// 4. tmp_hi = a_hi + b_hi - tmp_carry_mask
// 5. res_lo_s = tmp_lo_s + c_lo vi. res_carry_mask = res_lo_s <s tmp_lo_s
// 6. res_carry_mask = res_lo_s <s tmp_lo_s
// 7. res_lo = shift(res_lo_s)
// 8. res_hi = tmp_hi + c_hi - res_carry_mask
//
// Notice that the above 3-value addition still only requires two calls to shift, just like our
// 2-value addition.
@ -60,10 +63,10 @@ pub fn branch_hint() {
}
macro_rules! map3 {
($f:ident::<$l:literal>, $v:ident) => {
($f:ident:: < $l:literal > , $v:ident) => {
($f::<$l>($v.0), $f::<$l>($v.1), $f::<$l>($v.2))
};
($f:ident::<$l:literal>, $v1:ident, $v2:ident) => {
($f:ident:: < $l:literal > , $v1:ident, $v2:ident) => {
($f::<$l>($v1.0, $v2.0), $f::<$l>($v1.1, $v2.1), $f::<$l>($v1.2, $v2.2))
};
($f:ident, $v:ident) => {
@ -72,11 +75,11 @@ macro_rules! map3 {
($f:ident, $v0:ident, $v1:ident) => {
($f($v0.0, $v1.0), $f($v0.1, $v1.1), $f($v0.2, $v1.2))
};
($f:ident, rep $v0:ident, $v1:ident) => {
($f:ident,rep $v0:ident, $v1:ident) => {
($f($v0, $v1.0), $f($v0, $v1.1), $f($v0, $v1.2))
};
($f:ident, $v0:ident, rep $v1:ident) => {
($f:ident, $v0:ident,rep $v1:ident) => {
($f($v0.0, $v1), $f($v0.1, $v1), $f($v0.2, $v1))
};
}

+ 6
- 5
src/hash/rescue/mds/freq.rs

@ -7,9 +7,9 @@
/// of two vectors in "frequency domain". This follows from the simple fact that every circulant
/// matrix has the columns of the discrete Fourier transform matrix as orthogonal eigenvectors.
/// The implementation also avoids the use of 3-point FFTs, and 3-point iFFTs, and substitutes that
/// with explicit expressions. It also avoids, due to the form of our matrix in the frequency domain,
/// divisions by 2 and repeated modular reductions. This is because of our explicit choice of
/// an MDS matrix that has small powers of 2 entries in frequency domain.
/// with explicit expressions. It also avoids, due to the form of our matrix in the frequency
/// domain, divisions by 2 and repeated modular reductions. This is because of our explicit choice
/// of an MDS matrix that has small powers of 2 entries in frequency domain.
/// The following implementation has benefited greatly from the discussions and insights of
/// Hamish Ivey-Law and Jacqueline Nabaglo of Polygon Zero and is base on Nabaglo's Plonky2
/// implementation.
@ -19,8 +19,9 @@
// the MDS matrix i.e. just before the multiplication with the appropriate twiddle factors
// and application of the final four 3-point FFT in order to get the full 12-point FFT.
// The entries have been scaled appropriately in order to avoid divisions by 2 in iFFT2 and iFFT4.
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to generate
// MDS matrices efficiently in original domain, that was developed by the Polygon Zero team.
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to
// generate MDS matrices efficiently in original domain, that was developed by the Polygon Zero
// team.
const MDS_FREQ_BLOCK_ONE: [i64; 3] = [16, 8, 16];
const MDS_FREQ_BLOCK_TWO: [(i64, i64); 3] = [(-1, 2), (-1, 1), (4, 8)];
const MDS_FREQ_BLOCK_THREE: [i64; 3] = [-8, 1, 1];

+ 1
- 0
src/hash/rescue/rpo/digest.rs

@ -459,6 +459,7 @@ impl IntoIterator for RpoDigest {
#[cfg(test)]
mod tests {
use alloc::string::String;
use rand_utils::rand_value;
use super::{Deserializable, Felt, RpoDigest, Serializable, DIGEST_BYTES, DIGEST_SIZE};

+ 4
- 5
src/hash/rescue/rpo/mod.rs

@ -152,11 +152,10 @@ impl Hasher for Rpo256 {
fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest {
// initialize the state as follows:
// - seed is copied into the first 4 elements of the rate portion of the state.
// - if the value fits into a single field element, copy it into the fifth rate element
// and set the sixth rate element to 1.
// - if the value doesn't fit into a single field element, split it into two field
// elements, copy them into rate elements 5 and 6, and set the seventh rate element
// to 1.
// - if the value fits into a single field element, copy it into the fifth rate element and
// set the sixth rate element to 1.
// - if the value doesn't fit into a single field element, split it into two field elements,
// copy them into rate elements 5 and 6, and set the seventh rate element to 1.
// - set the first capacity element to 1
let mut state = [ZERO; STATE_WIDTH];
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());

+ 4
- 3
src/hash/rescue/rpo/tests.rs

@ -1,3 +1,5 @@
use alloc::{collections::BTreeSet, vec::Vec};
use proptest::prelude::*;
use rand_utils::rand_value;
@ -6,7 +8,6 @@ use super::{
Felt, FieldElement, Hasher, Rpo256, RpoDigest, StarkField, ONE, STATE_WIDTH, ZERO,
};
use crate::Word;
use alloc::{collections::BTreeSet, vec::Vec};
#[test]
fn test_sbox() {
@ -58,7 +59,7 @@ fn merge_vs_merge_in_domain() {
];
let merge_result = Rpo256::merge(&digests);
// ------------- merge with domain = 0 ----------------------------------------------------------
// ------------- merge with domain = 0 -------------
// set domain to ZERO. This should not change the result.
let domain = ZERO;
@ -66,7 +67,7 @@ fn merge_vs_merge_in_domain() {
let merge_in_domain_result = Rpo256::merge_in_domain(&digests, domain);
assert_eq!(merge_result, merge_in_domain_result);
// ------------- merge with domain = 1 ----------------------------------------------------------
// ------------- merge with domain = 1 -------------
// set domain to ONE. This should change the result.
let domain = ONE;

+ 1
- 0
src/hash/rescue/rpx/digest.rs

@ -459,6 +459,7 @@ impl IntoIterator for RpxDigest {
#[cfg(test)]
mod tests {
use alloc::string::String;
use rand_utils::rand_value;
use super::{Deserializable, Felt, RpxDigest, Serializable, DIGEST_BYTES, DIGEST_SIZE};

+ 6
- 4
src/hash/rescue/rpx/mod.rs

@ -26,8 +26,10 @@ pub type CubicExtElement = CubeExtension;
/// * Capacity size: 4 field elements.
/// * S-Box degree: 7.
/// * Rounds: There are 3 different types of rounds:
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` → `apply_inv_sbox`.
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension field).
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` →
/// `apply_inv_sbox`.
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension
/// field).
/// - (M): `apply_mds` → `add_constants`.
/// * Permutation: (FB) (E) (FB) (E) (FB) (E) (M).
///
@ -158,8 +160,8 @@ impl Hasher for Rpx256 {
// - seed is copied into the first 4 elements of the rate portion of the state.
// - if the value fits into a single field element, copy it into the fifth rate element and
// set the first capacity element to 5.
// - if the value doesn't fit into a single field element, split it into two field
// elements, copy them into rate elements 5 and 6 and set the first capacity element to 6.
// - if the value doesn't fit into a single field element, split it into two field elements,
// copy them into rate elements 5 and 6 and set the first capacity element to 6.
let mut state = [ZERO; STATE_WIDTH];
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());
state[INPUT2_RANGE.start] = Felt::new(value);

+ 5
- 5
src/merkle/error.rs

@ -33,22 +33,22 @@ impl fmt::Display for MerkleError {
DuplicateValuesForKey(key) => write!(f, "multiple values provided for key {key}"),
InvalidIndex { depth, value } => {
write!(f, "the index value {value} is not valid for the depth {depth}")
}
},
InvalidDepth { expected, provided } => {
write!(f, "the provided depth {provided} is not valid for {expected}")
}
},
InvalidSubtreeDepth { subtree_depth, tree_depth } => {
write!(f, "tried inserting a subtree of depth {subtree_depth} into a tree of depth {tree_depth}")
}
},
InvalidPath(_path) => write!(f, "the provided path is not valid"),
InvalidNumEntries(max) => write!(f, "number of entries exceeded the maximum: {max}"),
NodeNotInSet(index) => write!(f, "the node with index ({index}) is not in the set"),
NodeNotInStore(hash, index) => {
write!(f, "the node {hash:?} with index ({index}) is not in the store")
}
},
NumLeavesNotPowerOfTwo(leaves) => {
write!(f, "the leaves count {leaves} is not a power of 2")
}
},
RootNotInStore(root) => write!(f, "the root {:?} is not in the store", root),
SmtLeaf(smt_leaf_error) => write!(f, "smt leaf error: {smt_leaf_error}"),
}

+ 2
- 1
src/merkle/mmr/delta.rs

@ -1,6 +1,7 @@
use super::super::RpoDigest;
use alloc::vec::Vec;
use super::super::RpoDigest;
/// Container for the update data of a [super::PartialMmr]
#[derive(Debug)]
pub struct MmrDelta {

+ 2
- 2
src/merkle/mmr/error.rs

@ -21,11 +21,11 @@ impl Display for MmrError {
MmrError::InvalidPeaks => write!(fmt, "Invalid peaks count"),
MmrError::InvalidPeak => {
write!(fmt, "Peak values does not match merkle path computed root")
}
},
MmrError::InvalidUpdate => write!(fmt, "Invalid mmr update"),
MmrError::UnknownPeak => {
write!(fmt, "Peak not in Mmr")
}
},
MmrError::MerkleError(err) => write!(fmt, "{}", err),
}
}

+ 4
- 2
src/merkle/mmr/full.rs

@ -10,13 +10,14 @@
//! depths, i.e. as part of adding adding a new element to the forest the trees with same depth are
//! merged, creating a new tree with depth d+1, this process is continued until the property is
//! reestablished.
use alloc::vec::Vec;
use super::{
super::{InnerNodeInfo, MerklePath},
bit::TrueBitPositionIterator,
leaf_to_corresponding_tree, nodes_in_forest, MmrDelta, MmrError, MmrPeaks, MmrProof, Rpo256,
RpoDigest,
};
use alloc::vec::Vec;
// MMR
// ===============================================================================================
@ -402,7 +403,8 @@ impl<'a> Iterator for MmrNodes<'a> {
// the next parent position is one above the position of the pair
let parent = self.last_right << 1;
// the left node has been paired and the current parent yielded, removed it from the forest
// the left node has been paired and the current parent yielded, removed it from the
// forest
self.forest ^= self.last_right;
if self.forest & parent == 0 {
// this iteration yielded the left parent node

+ 4
- 4
src/merkle/mmr/mod.rs

@ -10,8 +10,6 @@ mod proof;
#[cfg(test)]
mod tests;
use super::{Felt, Rpo256, RpoDigest, Word};
// REEXPORTS
// ================================================================================================
pub use delta::MmrDelta;
@ -22,6 +20,8 @@ pub use partial::PartialMmr;
pub use peaks::MmrPeaks;
pub use proof::MmrProof;
use super::{Felt, Rpo256, RpoDigest, Word};
// UTILITIES
// ===============================================================================================
@ -42,8 +42,8 @@ const fn leaf_to_corresponding_tree(pos: usize, forest: usize) -> Option {
// - this means the first tree owns from `0` up to the `2^k_0` first positions, where `k_0`
// is the highest true bit position, the second tree from `2^k_0 + 1` up to `2^k_1` where
// `k_1` is the second highest bit, so on.
// - this means the highest bits work as a category marker, and the position is owned by
// the first tree which doesn't share a high bit with the position
// - this means the highest bits work as a category marker, and the position is owned by the
// first tree which doesn't share a high bit with the position
let before = forest & pos;
let after = forest ^ before;
let tree = after.ilog2();

+ 7
- 5
src/merkle/mmr/partial.rs

@ -1,12 +1,13 @@
use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use super::{MmrDelta, MmrProof, Rpo256, RpoDigest};
use crate::merkle::{
mmr::{leaf_to_corresponding_tree, nodes_in_forest},
InOrderIndex, InnerNodeInfo, MerklePath, MmrError, MmrPeaks,
};
use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
// TYPE ALIASES
// ================================================================================================
@ -613,12 +614,13 @@ fn forest_to_rightmost_index(forest: usize) -> InOrderIndex {
#[cfg(test)]
mod tests {
use alloc::{collections::BTreeSet, vec::Vec};
use super::{
forest_to_rightmost_index, forest_to_root_index, InOrderIndex, MmrPeaks, PartialMmr,
RpoDigest,
};
use crate::merkle::{int_to_node, MerkleStore, Mmr, NodeIndex};
use alloc::{collections::BTreeSet, vec::Vec};
const LEAVES: [RpoDigest; 7] = [
int_to_node(0),

+ 11
- 11
src/merkle/mmr/peaks.rs

@ -1,6 +1,7 @@
use super::{super::ZERO, Felt, MmrError, MmrProof, Rpo256, RpoDigest, Word};
use alloc::vec::Vec;
use super::{super::ZERO, Felt, MmrError, MmrProof, Rpo256, RpoDigest, Word};
// MMR PEAKS
// ================================================================================================
@ -18,12 +19,12 @@ pub struct MmrPeaks {
///
/// Examples:
///
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number
/// of peaks, in this case there are 2 peaks. The 0-indexed least-significant position of
/// the bit determines the number of elements of a tree, so the rightmost tree has `2**0`
/// elements and the left most has `2**2`.
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the
/// leftmost tree has `2**3=8` elements, and the right most has `2**2=4` elements.
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number of peaks,
/// in this case there are 2 peaks. The 0-indexed least-significant position of the bit
/// determines the number of elements of a tree, so the rightmost tree has `2**0` elements
/// and the left most has `2**2`.
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the leftmost tree has
/// `2**3=8` elements, and the right most has `2**2=4` elements.
num_leaves: usize,
/// All the peaks of every tree in the MMR forest. The peaks are always ordered by number of
@ -94,16 +95,15 @@ impl MmrPeaks {
/// - Flatten the vector of Words into a vector of Felts.
/// - Pad the peaks with ZERO to an even number of words, this removes the need to handle RPO
/// padding.
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of
/// hashing.
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of hashing.
pub fn flatten_and_pad_peaks(&self) -> Vec<Felt> {
let num_peaks = self.peaks.len();
// To achieve the padding rules above we calculate the length of the final vector.
// This is calculated as the number of field elements. Each peak is 4 field elements.
// The length is calculated as follows:
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires
// 64 field elements.
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires 64
// field elements.
// - If there are more than 16 peaks and the number of peaks is odd, the data is padded to
// an even number of peaks and as such requires `(num_peaks + 1) * 4` field elements.
// - If there are more than 16 peaks and the number of peaks is even, the data is not padded

+ 2
- 1
src/merkle/mmr/tests.rs

@ -1,3 +1,5 @@
use alloc::vec::Vec;
use super::{
super::{InnerNodeInfo, Rpo256, RpoDigest},
bit::TrueBitPositionIterator,
@ -8,7 +10,6 @@ use crate::{
merkle::{int_to_node, InOrderIndex, MerklePath, MerkleTree, MmrProof, NodeIndex},
Felt, Word,
};
use alloc::vec::Vec;
#[test]
fn test_position_equal_or_higher_than_leafs_is_never_contained() {

+ 4
- 2
src/merkle/partial_mt/tests.rs

@ -1,3 +1,5 @@
use alloc::{collections::BTreeMap, vec::Vec};
use super::{
super::{
digests_to_words, int_to_node, DefaultMerkleStore as MerkleStore, MerkleTree, NodeIndex,
@ -5,7 +7,6 @@ use super::{
},
Deserializable, InnerNodeInfo, RpoDigest, Serializable, ValuePath,
};
use alloc::{collections::BTreeMap, vec::Vec};
// TEST DATA
// ================================================================================================
@ -294,7 +295,8 @@ fn leaves() {
assert!(expected_leaves.eq(pmt.leaves()));
}
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected ones.
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected
/// ones.
#[test]
fn test_inner_node_iterator() {
let mt = MerkleTree::new(digests_to_words(&VALUES8)).unwrap();

+ 5
- 5
src/merkle/smt/full/error.rs

@ -37,17 +37,17 @@ impl fmt::Display for SmtLeafError {
match self {
InvalidNumEntriesForMultiple(num_entries) => {
write!(f, "Multiple leaf requires 2 or more entries. Got: {num_entries}")
}
},
InconsistentKeys { entries, key_1, key_2 } => {
write!(f, "Multiple leaf requires all keys to map to the same leaf index. Offending keys: {key_1} and {key_2}. Entries: {entries:?}.")
}
},
SingleKeyInconsistentWithLeafIndex { key, leaf_index } => {
write!(
f,
"Single key in leaf inconsistent with leaf index. Key: {key}, leaf index: {}",
leaf_index.value()
)
}
},
MultipleKeysInconsistentWithLeafIndex {
leaf_index_from_keys,
leaf_index_supplied,
@ -58,7 +58,7 @@ impl fmt::Display for SmtLeafError {
leaf_index_from_keys.value(),
leaf_index_supplied.value()
)
}
},
}
}
}
@ -80,7 +80,7 @@ impl fmt::Display for SmtProofError {
match self {
InvalidPathLength(path_length) => {
write!(f, "Invalid Merkle path length. Expected {SMT_DEPTH}, got {path_length}")
}
},
}
}
}

+ 20
- 19
src/merkle/smt/full/leaf.rs

@ -20,8 +20,8 @@ impl SmtLeaf {
///
/// # Errors
/// - Returns an error if 2 keys in `entries` map to a different leaf index
/// - Returns an error if 1 or more keys in `entries` map to a leaf index
/// different from `leaf_index`
/// - Returns an error if 1 or more keys in `entries` map to a leaf index different from
/// `leaf_index`
pub fn new(
entries: Vec<(RpoDigest, Word)>,
leaf_index: LeafIndex<SMT_DEPTH>,
@ -39,7 +39,7 @@ impl SmtLeaf {
}
Ok(Self::new_single(key, value))
}
},
_ => {
let leaf = Self::new_multiple(entries)?;
@ -53,7 +53,7 @@ impl SmtLeaf {
} else {
Ok(leaf)
}
}
},
}
}
@ -118,7 +118,7 @@ impl SmtLeaf {
// Note: All keys are guaranteed to have the same leaf index
let (first_key, _) = entries[0];
first_key.into()
}
},
}
}
@ -129,7 +129,7 @@ impl SmtLeaf {
SmtLeaf::Single(_) => 1,
SmtLeaf::Multiple(entries) => {
entries.len().try_into().expect("shouldn't have more than 2^64 entries")
}
},
}
}
@ -141,7 +141,7 @@ impl SmtLeaf {
SmtLeaf::Multiple(kvs) => {
let elements: Vec<Felt> = kvs.iter().copied().flat_map(kv_to_elements).collect();
Rpo256::hash_elements(&elements)
}
},
}
}
@ -182,7 +182,8 @@ impl SmtLeaf {
// HELPERS
// ---------------------------------------------------------------------------------------------
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another leaf.
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another
/// leaf.
pub(super) fn get_value(&self, key: &RpoDigest) -> Option<Word> {
// Ensure that `key` maps to this leaf
if self.index() != key.into() {
@ -197,7 +198,7 @@ impl SmtLeaf {
} else {
Some(EMPTY_WORD)
}
}
},
SmtLeaf::Multiple(kv_pairs) => {
for (key_in_leaf, value_in_leaf) in kv_pairs {
if key == key_in_leaf {
@ -206,7 +207,7 @@ impl SmtLeaf {
}
Some(EMPTY_WORD)
}
},
}
}
@ -219,7 +220,7 @@ impl SmtLeaf {
SmtLeaf::Empty(_) => {
*self = SmtLeaf::new_single(key, value);
None
}
},
SmtLeaf::Single(kv_pair) => {
if kv_pair.0 == key {
// the key is already in this leaf. Update the value and return the previous
@ -237,7 +238,7 @@ impl SmtLeaf {
None
}
}
},
SmtLeaf::Multiple(kv_pairs) => {
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
Ok(pos) => {
@ -245,14 +246,14 @@ impl SmtLeaf {
kv_pairs[pos].1 = value;
Some(old_value)
}
},
Err(pos) => {
kv_pairs.insert(pos, (key, value));
None
}
},
}
}
},
}
}
@ -277,7 +278,7 @@ impl SmtLeaf {
// another key is stored at leaf; nothing to update
(None, false)
}
}
},
SmtLeaf::Multiple(kv_pairs) => {
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
Ok(pos) => {
@ -292,13 +293,13 @@ impl SmtLeaf {
}
(Some(old_value), false)
}
},
Err(_) => {
// other keys are stored at leaf; nothing to update
(None, false)
}
},
}
}
},
}
}
}

+ 8
- 7
src/merkle/smt/full/mod.rs

@ -1,13 +1,14 @@
use super::{
EmptySubtreeRoots, Felt, InnerNode, InnerNodeInfo, LeafIndex, MerkleError, MerklePath,
NodeIndex, Rpo256, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
};
use alloc::{
collections::{BTreeMap, BTreeSet},
string::ToString,
vec::Vec,
};
use super::{
EmptySubtreeRoots, Felt, InnerNode, InnerNodeInfo, LeafIndex, MerkleError, MerklePath,
NodeIndex, Rpo256, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
};
mod error;
pub use error::{SmtLeafError, SmtProofError};
@ -32,8 +33,8 @@ pub const SMT_DEPTH: u8 = 64;
/// Sparse Merkle tree mapping 256-bit keys to 256-bit values. Both keys and values are represented
/// by 4 field elements.
///
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf to
/// which the key maps.
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf
/// to which the key maps.
///
/// A leaf is either empty, or holds one or more key-value pairs. An empty leaf hashes to the empty
/// word. Otherwise, a leaf hashes to the hash of its key-value pairs, ordered by key first, value
@ -187,7 +188,7 @@ impl Smt {
self.leaves.insert(leaf_index.value(), SmtLeaf::Single((key, value)));
None
}
},
}
}

+ 3
- 2
src/merkle/smt/full/proof.rs

@ -1,6 +1,7 @@
use alloc::string::ToString;
use super::{MerklePath, RpoDigest, SmtLeaf, SmtProofError, Word, SMT_DEPTH};
use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
use alloc::string::ToString;
/// A proof which can be used to assert membership (or non-membership) of key-value pairs in a
/// [`super::Smt`].
@ -57,7 +58,7 @@ impl SmtProof {
// make sure the Merkle path resolves to the correct root
self.compute_root() == *root
}
},
// If the key maps to a different leaf, the proof cannot verify membership of `value`
None => false,
}

+ 2
- 1
src/merkle/smt/full/tests.rs

@ -1,10 +1,11 @@
use alloc::vec::Vec;
use super::{Felt, LeafIndex, NodeIndex, Rpo256, RpoDigest, Smt, SmtLeaf, EMPTY_WORD, SMT_DEPTH};
use crate::{
merkle::{EmptySubtreeRoots, MerkleStore},
utils::{Deserializable, Serializable},
Word, ONE, WORD_SIZE,
};
use alloc::vec::Vec;
// SMT
// --------------------------------------------------------------------------------------------

+ 2
- 1
src/merkle/smt/mod.rs

@ -1,9 +1,10 @@
use alloc::vec::Vec;
use super::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, MerklePath, NodeIndex};
use crate::{
hash::rpo::{Rpo256, RpoDigest},
Felt, Word, EMPTY_WORD,
};
use alloc::vec::Vec;
mod full;
pub use full::{Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError, SMT_DEPTH};

+ 2
- 1
src/merkle/smt/simple/mod.rs

@ -1,9 +1,10 @@
use alloc::collections::{BTreeMap, BTreeSet};
use super::{
super::ValuePath, EmptySubtreeRoots, InnerNode, InnerNodeInfo, LeafIndex, MerkleError,
MerklePath, NodeIndex, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD, SMT_MAX_DEPTH,
SMT_MIN_DEPTH,
};
use alloc::collections::{BTreeMap, BTreeSet};
#[cfg(test)]
mod tests;

+ 2
- 1
src/merkle/smt/simple/tests.rs

@ -1,3 +1,5 @@
use alloc::vec::Vec;
use super::{
super::{MerkleError, RpoDigest, SimpleSmt},
NodeIndex,
@ -10,7 +12,6 @@ use crate::{
},
Word, EMPTY_WORD,
};
use alloc::vec::Vec;
// TEST DATA
// ================================================================================================

+ 6
- 6
src/merkle/store/mod.rs

@ -127,8 +127,8 @@ impl> MerkleStore {
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
/// the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
/// store.
pub fn get_node(&self, root: RpoDigest, index: NodeIndex) -> Result<RpoDigest, MerkleError> {
let mut hash = root;
@ -152,8 +152,8 @@ impl> MerkleStore {
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
/// the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
/// store.
pub fn get_path(&self, root: RpoDigest, index: NodeIndex) -> Result<ValuePath, MerkleError> {
let mut hash = root;
let mut path = Vec::with_capacity(index.depth().into());
@ -421,8 +421,8 @@ impl> MerkleStore {
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
/// the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
/// store.
pub fn set_node(
&mut self,
mut root: RpoDigest,

+ 6
- 7
src/merkle/store/tests.rs

@ -1,4 +1,10 @@
use seq_macro::seq;
#[cfg(feature = "std")]
use {
super::{Deserializable, Serializable},
alloc::boxed::Box,
std::error::Error,
};
use super::{
DefaultMerkleStore as MerkleStore, EmptySubtreeRoots, MerkleError, MerklePath, NodeIndex,
@ -11,13 +17,6 @@ use crate::{
Felt, Word, ONE, WORD_SIZE, ZERO,
};
#[cfg(feature = "std")]
use {
super::{Deserializable, Serializable},
alloc::boxed::Box,
std::error::Error,
};
// TEST DATA
// ================================================================================================

+ 6
- 4
src/rand/rpo.rs

@ -1,10 +1,12 @@
use alloc::{string::ToString, vec::Vec};
use rand_core::impls;
use super::{Felt, FeltRng, FieldElement, RandomCoin, RandomCoinError, RngCore, Word, ZERO};
use crate::{
hash::rpo::{Rpo256, RpoDigest},
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
use alloc::{string::ToString, vec::Vec};
use rand_core::impls;
// CONSTANTS
// ================================================================================================
@ -20,8 +22,8 @@ const HALF_RATE_WIDTH: usize = (Rpo256::RATE_RANGE.end - Rpo256::RATE_RANGE.star
/// described in <https://eprint.iacr.org/2011/499.pdf>.
///
/// The simplification is related to the following facts:
/// 1. A call to the reseed method implies one and only one call to the permutation function.
/// This is possible because in our case we never reseed with more than 4 field elements.
/// 1. A call to the reseed method implies one and only one call to the permutation function. This
/// is possible because in our case we never reseed with more than 4 field elements.
/// 2. As a result of the previous point, we don't make use of an input buffer to accumulate seed
/// material.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]

+ 6
- 4
src/rand/rpx.rs

@ -1,10 +1,12 @@
use alloc::{string::ToString, vec::Vec};
use rand_core::impls;
use super::{Felt, FeltRng, FieldElement, RandomCoin, RandomCoinError, RngCore, Word, ZERO};
use crate::{
hash::rpx::{Rpx256, RpxDigest},
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
use alloc::{string::ToString, vec::Vec};
use rand_core::impls;
// CONSTANTS
// ================================================================================================
@ -20,8 +22,8 @@ const HALF_RATE_WIDTH: usize = (Rpx256::RATE_RANGE.end - Rpx256::RATE_RANGE.star
/// described in <https://eprint.iacr.org/2011/499.pdf>.
///
/// The simplification is related to the following facts:
/// 1. A call to the reseed method implies one and only one call to the permutation function.
/// This is possible because in our case we never reseed with more than 4 field elements.
/// 1. A call to the reseed method implies one and only one call to the permutation function. This
/// is possible because in our case we never reseed with more than 4 field elements.
/// 2. As a result of the previous point, we don't make use of an input buffer to accumulate seed
/// material.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]

+ 4
- 3
src/utils/kv_map.rs

@ -62,8 +62,8 @@ impl KvMap for BTreeMap {
/// The [RecordingMap] is composed of three parts:
/// - `data`: which contains the current set of key-value pairs in the map.
/// - `updates`: which tracks keys for which values have been changed since the map was
/// instantiated. updates include both insertions, removals and updates of values under existing
/// keys.
/// instantiated. updates include both insertions, removals and updates of values under existing
/// keys.
/// - `trace`: which contains the key-value pairs from the original data which have been accesses
/// since the map was instantiated.
#[derive(Debug, Default, Clone, Eq, PartialEq)]
@ -325,7 +325,8 @@ mod tests {
let mut map = RecordingMap::new(ITEMS.to_vec());
assert!(map.iter().all(|(x, y)| ITEMS.contains(&(*x, *y))));
// when inserting entry with key that already exists the iterator should return the new value
// when inserting entry with key that already exists the iterator should return the new
// value
let new_value = 5;
map.insert(4, new_value);
assert_eq!(map.iter().count(), ITEMS.len());

+ 4
- 4
src/utils/mod.rs

@ -59,16 +59,16 @@ impl Display for HexParseError {
match self {
HexParseError::InvalidLength { expected, actual } => {
write!(f, "Expected hex data to have length {expected}, including the 0x prefix. Got {actual}")
}
},
HexParseError::MissingPrefix => {
write!(f, "Hex encoded data must start with 0x prefix")
}
},
HexParseError::InvalidChar => {
write!(f, "Hex encoded data must contain characters [a-zA-Z0-9]")
}
},
HexParseError::OutOfRange => {
write!(f, "Hex encoded values of an RpoDigest must be inside the field modulus")
}
},
}
}
}

Loading…
Cancel
Save