9 Commits

Author SHA1 Message Date
Al-Kindi-0
d2a6739605 feat: derandomize RPO-STARK DSA (#358) 2025-01-08 11:42:23 -08:00
Al-Kindi-0
cae87a2790 chore: add signatures benchmarks (#354) 2024-12-12 19:58:33 -08:00
Al-Kindi-0
335c50f54d feat: implement RPO STARK-based signature DSA (with zero knowledge) (#349) 2024-12-12 19:33:24 -08:00
Qyriad
b151773b0d feat: implement concurrent Smt construction (#341)
* merkle: add parent() helper function on NodeIndex
* smt: add pairs_to_leaf() to trait
* smt: add sorted_pairs_to_leaves() and test for it
* smt: implement single subtree-8 hashing, w/ benchmarks & tests

This will be composed into depth-8-subtree-based computation of entire
sparse Merkle trees.

* merkle: add a benchmark for constructing 256-balanced trees

This is intended for comparison with the benchmarks from the previous
commit. This benchmark represents the theoretical perfect-efficiency
performance we could possibly (but impractically) get for computing
depth-8 sparse Merkle subtrees.

* smt: test that SparseMerkleTree::build_subtree() is composable

* smt: test that subtree logic can correctly construct an entire tree

This commit ensures that `SparseMerkleTree::build_subtree()` can
correctly compose into building an entire sparse Merkle tree, without
yet getting into potential complications concurrency introduces.

* smt: implement test for basic parallelized subtree computation w/ rayon

Building on the previous commit, this commit implements a test proving
that `SparseMerkleTree::build_subtree()` can be composed into itself not
just concurrently, but in parallel, without issue.

* smt: add from_raw_parts() to trait interface

This commit adds a new required method to the SparseMerkleTree trait,
to allow generic construction from pre-computed parts.

This will be used to add a generic version of `with_entries()` in a
later commit.

* smt: add parallel constructors to Smt and SimpleSmt

What the previous few commits have been leading up to: SparseMerkleTree
now has a function to construct the tree from existing data in parallel.
This is significantly faster than the singlethreaded equivalent.
Benchmarks incoming!

---------

Co-authored-by: krushimir <krushimir@reilabs.co>
Co-authored-by: krushimir <kresimir.grofelnik@reilabs.io>
2024-12-04 10:54:41 -08:00
Bobbin Threadbare
1867f842d3 chore: update changelog 2024-11-24 22:26:51 -08:00
Al-Kindi-0
e1072ecc7f chore: update to winterfell dependencies to 0.11 (#346) 2024-11-24 22:20:19 -08:00
Bobbin Threadbare
063ad49afd chore: update crate version to v0.13.0 2024-11-21 15:56:55 -08:00
Philipp Gackstatter
a27f9ad828 refactor: use thiserror to derive errors and update error messages (#344) 2024-11-21 15:52:20 -08:00
Al-Kindi-0
50dd6bda19 fix: skip using the field element containing the proof-of-work (#343) 2024-11-18 00:16:27 -08:00
42 changed files with 2642 additions and 444 deletions

View File

@@ -1,6 +1,16 @@
## 0.11.0 (2024-10-30)
## 0.13.0 (2024-11-24)
- Fixed a bug in the implementation of `draw_integers` for `RpoRandomCoin` (#343).
- [BREAKING] Refactor error messages and use `thiserror` to derive errors (#344).
- [BREAKING] Updated Winterfell dependency to v0.11 (#346).
- Added RPO-STARK based DSA (#349).
- Added benchmarks for DSA implementations (#354).
- Implemented deterministic RPO-STARK based DSA (#358).
## 0.12.0 (2024-10-30)
- [BREAKING] Updated Winterfell dependency to v0.10 (#338).
- Added parallel implementation of `Smt::with_entries()` with significantly better performance when the `concurrent` feature is enabled (#341).
## 0.11.0 (2024-10-17)

270
Cargo.lock generated
View File

@@ -19,9 +19,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstream"
version = "0.6.17"
version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338"
checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -34,9 +34,9 @@ dependencies = [
[[package]]
name = "anstyle"
version = "1.0.9"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "anstyle-parse"
@@ -78,6 +78,12 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "assert_matches"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
[[package]]
name = "autocfg"
version = "1.4.0"
@@ -107,9 +113,9 @@ checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
[[package]]
name = "blake3"
version = "1.5.4"
version = "1.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7"
checksum = "b8ee0c1824c4dea5b5f81736aff91bae041d2c07ee1192bec91054e10e3e601e"
dependencies = [
"arrayref",
"arrayvec",
@@ -147,9 +153,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
version = "1.1.31"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f"
checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d"
dependencies = [
"jobserver",
"libc",
@@ -191,9 +197,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.20"
version = "4.5.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8"
checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84"
dependencies = [
"clap_builder",
"clap_derive",
@@ -201,9 +207,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.20"
version = "4.5.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838"
dependencies = [
"anstream",
"anstyle",
@@ -225,9 +231,9 @@ dependencies = [
[[package]]
name = "clap_lex"
version = "0.7.2"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "colorchoice"
@@ -243,9 +249,9 @@ checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
[[package]]
name = "cpufeatures"
version = "0.2.14"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0"
checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3"
dependencies = [
"libc",
]
@@ -345,19 +351,19 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
[[package]]
name = "errno"
version = "0.3.9"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
dependencies = [
"libc",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
name = "fastrand"
version = "2.1.1"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "fnv"
@@ -450,9 +456,9 @@ dependencies = [
[[package]]
name = "itoa"
version = "1.0.11"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
[[package]]
name = "jobserver"
@@ -465,10 +471,11 @@ dependencies = [
[[package]]
name = "js-sys"
version = "0.3.72"
version = "0.3.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7"
dependencies = [
"once_cell",
"wasm-bindgen",
]
@@ -489,9 +496,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.161"
version = "0.2.168"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1"
checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d"
[[package]]
name = "libm"
@@ -519,8 +526,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "miden-crypto"
version = "0.12.0"
version = "0.14.0"
dependencies = [
"assert_matches",
"blake3",
"cc",
"clap",
@@ -534,13 +542,18 @@ dependencies = [
"rand",
"rand_chacha",
"rand_core",
"rayon",
"seq-macro",
"serde",
"sha3",
"thiserror",
"winter-air",
"winter-crypto",
"winter-math",
"winter-prover",
"winter-rand-utils",
"winter-utils",
"winter-verifier",
]
[[package]]
@@ -629,6 +642,12 @@ version = "11.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
[[package]]
name = "pin-project-lite"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
[[package]]
name = "plotters"
version = "0.3.7"
@@ -668,9 +687,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.89"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
@@ -783,9 +802,9 @@ dependencies = [
[[package]]
name = "regex-automata"
version = "0.4.8"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
@@ -800,15 +819,15 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustix"
version = "0.38.38"
version = "0.38.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a"
checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -846,18 +865,18 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
version = "1.0.214"
version = "1.0.216"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5"
checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.214"
version = "1.0.216"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766"
checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e"
dependencies = [
"proc-macro2",
"quote",
@@ -866,9 +885,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.132"
version = "1.0.133"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
dependencies = [
"itoa",
"memchr",
@@ -900,9 +919,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "syn"
version = "2.0.85"
version = "2.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56"
checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
dependencies = [
"proc-macro2",
"quote",
@@ -911,9 +930,9 @@ dependencies = [
[[package]]
name = "tempfile"
version = "3.13.0"
version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b"
checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c"
dependencies = [
"cfg-if",
"fastrand",
@@ -922,6 +941,26 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "thiserror"
version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
@@ -932,6 +971,34 @@ dependencies = [
"serde_json",
]
[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
[[package]]
name = "typenum"
version = "1.17.0"
@@ -946,9 +1013,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
[[package]]
name = "unicode-ident"
version = "1.0.13"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "utf8parse"
@@ -989,9 +1056,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396"
dependencies = [
"cfg-if",
"once_cell",
@@ -1000,13 +1067,12 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn",
@@ -1015,9 +1081,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -1025,9 +1091,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2"
dependencies = [
"proc-macro2",
"quote",
@@ -1038,15 +1104,15 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6"
[[package]]
name = "web-sys"
version = "0.3.72"
version = "0.3.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112"
checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -1143,33 +1209,82 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winter-air"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"libm",
"winter-crypto",
"winter-fri",
"winter-math",
"winter-utils",
]
[[package]]
name = "winter-crypto"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "163da45f1d4d65cac361b8df4835a6daa95b3399154e16eb0305c178c6f6c1f4"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"blake3",
"rand",
"rand_chacha",
"sha3",
"winter-math",
"winter-utils",
]
[[package]]
name = "winter-fri"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"rand",
"rand_chacha",
"winter-crypto",
"winter-math",
"winter-utils",
]
[[package]]
name = "winter-math"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a8ba832121679e79b004b0003018c85873956d742a39c348c247f680fe15e00"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"serde",
"winter-utils",
]
[[package]]
name = "winter-maybe-async"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"quote",
"syn",
]
[[package]]
name = "winter-prover"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"rand",
"rand_chacha",
"tracing",
"winter-air",
"winter-crypto",
"winter-fri",
"winter-math",
"winter-maybe-async",
"winter-rand-utils",
"winter-utils",
]
[[package]]
name = "winter-rand-utils"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a7616d11fcc26552dada45c803a884ac97c253218835b83a2c63e1c2a988639"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"rand",
"winter-utils",
@@ -1177,9 +1292,20 @@ dependencies = [
[[package]]
name = "winter-utils"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76b116c8ade0172506f8bda32dc674cf6b230adc8516e5138a0173ae69158a4f"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
[[package]]
name = "winter-verifier"
version = "0.11.0"
source = "git+https://github.com/Al-Kindi-0/winterfell?branch=al-zk#5bafedbc2ba00cf85c6182725754547f6cddafc3"
dependencies = [
"winter-air",
"winter-crypto",
"winter-fri",
"winter-math",
"winter-utils",
]
[[package]]
name = "zerocopy"

View File

@@ -1,12 +1,12 @@
[package]
name = "miden-crypto"
version = "0.12.0"
version = "0.14.0"
description = "Miden Cryptographic primitives"
authors = ["miden contributors"]
readme = "README.md"
license = "MIT"
repository = "https://github.com/0xPolygonMiden/crypto"
documentation = "https://docs.rs/miden-crypto/0.12.0"
documentation = "https://docs.rs/miden-crypto/0.14.0"
categories = ["cryptography", "no-std"]
keywords = ["miden", "crypto", "hash", "merkle"]
edition = "2021"
@@ -19,6 +19,10 @@ bench = false
doctest = false
required-features = ["executable"]
[[bench]]
name = "dsa"
harness = false
[[bench]]
name = "hash"
harness = false
@@ -27,13 +31,28 @@ harness = false
name = "smt"
harness = false
[[bench]]
name = "smt-subtree"
harness = false
required-features = ["internal"]
[[bench]]
name = "merkle"
harness = false
[[bench]]
name = "smt-with-entries"
harness = false
[[bench]]
name = "store"
harness = false
[features]
default = ["std"]
concurrent = ["dep:rayon"]
default = ["std", "concurrent"]
executable = ["dep:clap", "dep:rand-utils", "std"]
internal = []
serde = ["dep:serde", "serde?/alloc", "winter-math/serde"]
std = [
"blake3/std",
@@ -48,26 +67,32 @@ std = [
[dependencies]
blake3 = { version = "1.5", default-features = false }
clap = { version = "4.5", optional = true, features = ["derive"] }
getrandom = { version = "0.2", features = ["js"] }
num = { version = "0.4", default-features = false, features = ["alloc", "libm"] }
num-complex = { version = "0.4", default-features = false }
rand = { version = "0.8", default-features = false }
rand_chacha = { version = "0.3", default-features = false }
rand_core = { version = "0.6", default-features = false }
rand-utils = { version = "0.10", package = "winter-rand-utils", optional = true }
rand-utils = {git = 'https://github.com/Al-Kindi-0/winterfell', package = "winter-rand-utils" , branch = 'al-zk', optional = true }
rayon = { version = "1.10", optional = true }
serde = { version = "1.0", default-features = false, optional = true, features = ["derive"] }
sha3 = { version = "0.10", default-features = false }
winter-crypto = { version = "0.10", default-features = false }
winter-math = { version = "0.10", default-features = false }
winter-utils = { version = "0.10", default-features = false }
thiserror = { version = "2.0", default-features = false }
winter-air = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
winter-crypto = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
winter-prover = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
winter-verifier = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
winter-math = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
winter-utils = {git = 'https://github.com/Al-Kindi-0/winterfell', branch = 'al-zk' }
[dev-dependencies]
assert_matches = { version = "1.5", default-features = false }
criterion = { version = "0.5", features = ["html_reports"] }
getrandom = { version = "0.2", features = ["js"] }
hex = { version = "0.4", default-features = false, features = ["alloc"] }
proptest = "1.5"
rand_chacha = { version = "0.3", default-features = false }
rand-utils = { version = "0.10", package = "winter-rand-utils" }
rand-utils = {git = 'https://github.com/Al-Kindi-0/winterfell', package = "winter-rand-utils" , branch = 'al-zk' }
seq-macro = { version = "0.3" }
[build-dependencies]
cc = { version = "1.1", optional = true, features = ["parallel"] }
cc = { version = "1.2", optional = true, features = ["parallel"] }
glob = "0.3"

View File

@@ -83,4 +83,4 @@ build-sve: ## Build with sve support
.PHONY: bench-tx
bench-tx: ## Run crypto benchmarks
cargo bench
cargo bench --features="concurrent"

View File

@@ -60,10 +60,11 @@ make
This crate can be compiled with the following features:
- `concurrent`- enabled by default; enables multi-threaded implementation of `Smt::with_entries()` which significantly improves performance on multi-core CPUs.
- `std` - enabled by default and relies on the Rust standard library.
- `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
Both of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections.
All of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections.
To compile with `no_std`, disable default features via `--no-default-features` flag or using the following command:

View File

@@ -1,4 +1,6 @@
# Miden VM Hash Functions
# Benchmarks
## Miden VM Hash Functions
In the Miden VM, we make use of different hash functions. Some of these are "traditional" hash functions, like `BLAKE3`, which are optimized for out-of-STARK performance, while others are algebraic hash functions, like `Rescue Prime`, and are more optimized for a better performance inside the STARK. In what follows, we benchmark several such hash functions and compare against other constructions that are used by other proving systems. More precisely, we benchmark:
* **BLAKE3** as specified [here](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) and implemented [here](https://github.com/BLAKE3-team/BLAKE3) (with a wrapper exposed via this crate).
@@ -8,13 +10,13 @@ In the Miden VM, we make use of different hash functions. Some of these are "tra
* **Rescue Prime Optimized (RPO)** as specified [here](https://eprint.iacr.org/2022/1577) and implemented in this crate.
* **Rescue Prime Extended (RPX)** a variant of the [xHash](https://eprint.iacr.org/2023/1045) hash function as implemented in this crate.
## Comparison and Instructions
### Comparison and Instructions
### Comparison
#### Comparison
We benchmark the above hash functions using two scenarios. The first is a 2-to-1 $(a,b)\mapsto h(a,b)$ hashing where both $a$, $b$ and $h(a,b)$ are the digests corresponding to each of the hash functions.
The second scenario is that of sequential hashing where we take a sequence of length $100$ field elements and hash these to produce a single digest. The digests are $4$ field elements in a prime field with modulus $2^{64} - 2^{32} + 1$ (i.e., 32 bytes) for Poseidon, Rescue Prime and RPO, and an array `[u8; 32]` for SHA3 and BLAKE3.
#### Scenario 1: 2-to-1 hashing `h(a,b)`
##### Scenario 1: 2-to-1 hashing `h(a,b)`
| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | RPX_256 |
| ------------------- | ------ | ------- | --------- | --------- | ------- | ------- |
@@ -26,7 +28,7 @@ The second scenario is that of sequential hashing where we take a sequence of le
| Intel Core i5-8279U | 68 ns | 536 ns | 2.0 µs | 13.6 µs | 8.5 µs | 4.4 µs |
| Intel Xeon 8375C | 67 ns | | | | 8.2 µs | |
#### Scenario 2: Sequential hashing of 100 elements `h([a_0,...,a_99])`
##### Scenario 2: Sequential hashing of 100 elements `h([a_0,...,a_99])`
| Function | BLAKE3 | SHA3 | Poseidon | Rp64_256 | RPO_256 | RPX_256 |
| ------------------- | -------| ------- | --------- | --------- | ------- | ------- |
@@ -42,7 +44,7 @@ Notes:
- On Graviton 3, RPO256 and RPX256 are run with SVE acceleration enabled.
- On AMD EPYC 9R14, RPO256 and RPX256 are run with AVX2 acceleration enabled.
### Instructions
#### Instructions
Before you can run the benchmarks, you'll need to make sure you have Rust [installed](https://www.rust-lang.org/tools/install). After that, to run the benchmarks for RPO and BLAKE3, clone the current repository, and from the root directory of the repo run the following:
```
@@ -54,3 +56,47 @@ To run the benchmarks for Rescue Prime, Poseidon and SHA3, clone the following [
```
cargo bench hash
```
## Miden VM DSA
We make use of the following digital signature algorithms (DSA) in the Miden VM:
* **RPO-Falcon512** as specified [here](https://falcon-sign.info/falcon.pdf) with the one difference being the use of the RPO hash function for the hash-to-point algorithm (Algorithm 3 in the previous reference) instead of SHAKE256.
* **RPO-STARK** as specified [here](https://eprint.iacr.org/2024/1553), where the parameters are the ones for the unique-decoding regime (UDR) with the two differences:
* We rely on Conjecture 1 in the [ethSTARK](https://eprint.iacr.org/2021/582) paper.
* The number of FRI queries is $30$ and the grinding factor is $12$ bits. Thus using the previous point we can argue that the modified version achieves at least $102$ bits of average-case existential unforgeability security against $2^{113}$-query bound adversaries that can obtain up to $2^{64}$ signatures under the same public key.
### Comparison and Instructions
#### Comparison
##### Key Generation
| DSA | RPO-Falcon512 | RPO-STARK |
| ------------------- | :-----------: | :-------: |
| Apple M1 Pro | 590 ms | 6 µs |
| Intel Core i5-8279U | 585 ms | 10 µs |
##### Signature Generation
| DSA | RPO-Falcon512 | RPO-STARK |
| ------------------- | :-----------: | :-------: |
| Apple M1 Pro | 1.5 ms | 78 ms |
| Intel Core i5-8279U | 1.8 ms | 130 ms |
##### Signature Verification
| DSA | RPO-Falcon512 | RPO-STARK |
| ------------------- | :-----------: | :-------: |
| Apple M1 Pro | 0.7 ms | 4.5 ms |
| Intel Core i5-8279U | 1.2 ms | 7.9 ms |
#### Instructions
Before you can run the benchmarks, you'll need to make sure you have Rust [installed](https://www.rust-lang.org/tools/install). After that, to run the benchmarks, clone the current repository, and from the root directory of the repo run the following:
```
cargo bench --bench dsa
```

88
benches/dsa.rs Normal file
View File

@@ -0,0 +1,88 @@
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use miden_crypto::dsa::{
rpo_falcon512::SecretKey as FalconSecretKey, rpo_stark::SecretKey as RpoStarkSecretKey,
};
use rand_utils::rand_array;
fn key_gen_falcon(c: &mut Criterion) {
c.bench_function("Falcon public key generation", |bench| {
bench.iter_batched(|| FalconSecretKey::new(), |sk| sk.public_key(), BatchSize::SmallInput)
});
c.bench_function("Falcon secret key generation", |bench| {
bench.iter_batched(|| {}, |_| FalconSecretKey::new(), BatchSize::SmallInput)
});
}
fn key_gen_rpo_stark(c: &mut Criterion) {
c.bench_function("RPO-STARK public key generation", |bench| {
bench.iter_batched(
|| RpoStarkSecretKey::random(),
|sk| sk.public_key(),
BatchSize::SmallInput,
)
});
c.bench_function("RPO-STARK secret key generation", |bench| {
bench.iter_batched(|| {}, |_| RpoStarkSecretKey::random(), BatchSize::SmallInput)
});
}
fn signature_gen_falcon(c: &mut Criterion) {
c.bench_function("Falcon signature generation", |bench| {
bench.iter_batched(
|| (FalconSecretKey::new(), rand_array().into()),
|(sk, msg)| sk.sign(msg),
BatchSize::SmallInput,
)
});
}
fn signature_gen_rpo_stark(c: &mut Criterion) {
c.bench_function("RPO-STARK signature generation", |bench| {
bench.iter_batched(
|| (RpoStarkSecretKey::random(), rand_array().into()),
|(sk, msg)| sk.sign(msg),
BatchSize::SmallInput,
)
});
}
fn signature_ver_falcon(c: &mut Criterion) {
c.bench_function("Falcon signature verification", |bench| {
bench.iter_batched(
|| {
let sk = FalconSecretKey::new();
let msg = rand_array().into();
(sk.public_key(), msg, sk.sign(msg))
},
|(pk, msg, sig)| pk.verify(msg, &sig),
BatchSize::SmallInput,
)
});
}
fn signature_ver_rpo_stark(c: &mut Criterion) {
c.bench_function("RPO-STARK signature verification", |bench| {
bench.iter_batched(
|| {
let sk = RpoStarkSecretKey::random();
let msg = rand_array().into();
(sk.public_key(), msg, sk.sign(msg))
},
|(pk, msg, sig)| pk.verify(msg, &sig),
BatchSize::SmallInput,
)
});
}
criterion_group!(
dsa_group,
key_gen_falcon,
key_gen_rpo_stark,
signature_gen_falcon,
signature_gen_rpo_stark,
signature_ver_falcon,
signature_ver_rpo_stark
);
criterion_main!(dsa_group);

66
benches/merkle.rs Normal file
View File

@@ -0,0 +1,66 @@
//! Benchmark for building a [`miden_crypto::merkle::MerkleTree`]. This is intended to be compared
//! with the results from `benches/smt-subtree.rs`, as building a fully balanced Merkle tree with
//! 256 leaves should indicate the *absolute best* performance we could *possibly* get for building
//! a depth-8 sparse Merkle subtree, though practically speaking building a fully balanced Merkle
//! tree will perform better than the sparse version. At the time of this writing (2024/11/24), this
//! benchmark is about four times more efficient than the equivalent benchmark in
//! `benches/smt-subtree.rs`.
use std::{hint, mem, time::Duration};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use miden_crypto::{merkle::MerkleTree, Felt, Word, ONE};
use rand_utils::prng_array;
fn balanced_merkle_even(c: &mut Criterion) {
c.bench_function("balanced-merkle-even", |b| {
b.iter_batched(
|| {
let entries: Vec<Word> =
(0..256).map(|i| [Felt::new(i), ONE, ONE, Felt::new(i)]).collect();
assert_eq!(entries.len(), 256);
entries
},
|leaves| {
let tree = MerkleTree::new(hint::black_box(leaves)).unwrap();
assert_eq!(tree.depth(), 8);
},
BatchSize::SmallInput,
);
});
}
fn balanced_merkle_rand(c: &mut Criterion) {
let mut seed = [0u8; 32];
c.bench_function("balanced-merkle-rand", |b| {
b.iter_batched(
|| {
let entries: Vec<Word> = (0..256).map(|_| generate_word(&mut seed)).collect();
assert_eq!(entries.len(), 256);
entries
},
|leaves| {
let tree = MerkleTree::new(hint::black_box(leaves)).unwrap();
assert_eq!(tree.depth(), 8);
},
BatchSize::SmallInput,
);
});
}
criterion_group! {
name = smt_subtree_group;
config = Criterion::default()
.measurement_time(Duration::from_secs(20))
.configure_from_args();
targets = balanced_merkle_even, balanced_merkle_rand
}
criterion_main!(smt_subtree_group);
// HELPER FUNCTIONS
// --------------------------------------------------------------------------------------------
fn generate_word(seed: &mut [u8; 32]) -> Word {
mem::swap(seed, &mut prng_array(*seed));
let nums: [u64; 4] = prng_array(*seed);
[Felt::new(nums[0]), Felt::new(nums[1]), Felt::new(nums[2]), Felt::new(nums[3])]
}

142
benches/smt-subtree.rs Normal file
View File

@@ -0,0 +1,142 @@
use std::{fmt::Debug, hint, mem, time::Duration};
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
use miden_crypto::{
hash::rpo::RpoDigest,
merkle::{build_subtree_for_bench, NodeIndex, SmtLeaf, SubtreeLeaf, SMT_DEPTH},
Felt, Word, ONE,
};
use rand_utils::prng_array;
use winter_utils::Randomizable;
const PAIR_COUNTS: [u64; 5] = [1, 64, 128, 192, 256];
fn smt_subtree_even(c: &mut Criterion) {
let mut seed = [0u8; 32];
let mut group = c.benchmark_group("subtree8-even");
for pair_count in PAIR_COUNTS {
let bench_id = BenchmarkId::from_parameter(pair_count);
group.bench_with_input(bench_id, &pair_count, |b, &pair_count| {
b.iter_batched(
|| {
// Setup.
let entries: Vec<(RpoDigest, Word)> = (0..pair_count)
.map(|n| {
// A single depth-8 subtree can have a maximum of 255 leaves.
let leaf_index = ((n as f64 / pair_count as f64) * 255.0) as u64;
let key = RpoDigest::new([
generate_value(&mut seed),
ONE,
Felt::new(n),
Felt::new(leaf_index),
]);
let value = generate_word(&mut seed);
(key, value)
})
.collect();
let mut leaves: Vec<_> = entries
.iter()
.map(|(key, value)| {
let leaf = SmtLeaf::new_single(*key, *value);
let col = NodeIndex::from(leaf.index()).value();
let hash = leaf.hash();
SubtreeLeaf { col, hash }
})
.collect();
leaves.sort();
leaves.dedup_by_key(|leaf| leaf.col);
leaves
},
|leaves| {
// Benchmarked function.
let (subtree, _) = build_subtree_for_bench(
hint::black_box(leaves),
hint::black_box(SMT_DEPTH),
hint::black_box(SMT_DEPTH),
);
assert!(!subtree.is_empty());
},
BatchSize::SmallInput,
);
});
}
}
fn smt_subtree_random(c: &mut Criterion) {
let mut seed = [0u8; 32];
let mut group = c.benchmark_group("subtree8-rand");
for pair_count in PAIR_COUNTS {
let bench_id = BenchmarkId::from_parameter(pair_count);
group.bench_with_input(bench_id, &pair_count, |b, &pair_count| {
b.iter_batched(
|| {
// Setup.
let entries: Vec<(RpoDigest, Word)> = (0..pair_count)
.map(|i| {
let leaf_index: u8 = generate_value(&mut seed);
let key = RpoDigest::new([
ONE,
ONE,
Felt::new(i),
Felt::new(leaf_index as u64),
]);
let value = generate_word(&mut seed);
(key, value)
})
.collect();
let mut leaves: Vec<_> = entries
.iter()
.map(|(key, value)| {
let leaf = SmtLeaf::new_single(*key, *value);
let col = NodeIndex::from(leaf.index()).value();
let hash = leaf.hash();
SubtreeLeaf { col, hash }
})
.collect();
leaves.sort();
leaves
},
|leaves| {
let (subtree, _) = build_subtree_for_bench(
hint::black_box(leaves),
hint::black_box(SMT_DEPTH),
hint::black_box(SMT_DEPTH),
);
assert!(!subtree.is_empty());
},
BatchSize::SmallInput,
);
});
}
}
criterion_group! {
name = smt_subtree_group;
config = Criterion::default()
.measurement_time(Duration::from_secs(40))
.sample_size(60)
.configure_from_args();
targets = smt_subtree_even, smt_subtree_random
}
criterion_main!(smt_subtree_group);
// HELPER FUNCTIONS
// --------------------------------------------------------------------------------------------
fn generate_value<T: Copy + Debug + Randomizable>(seed: &mut [u8; 32]) -> T {
mem::swap(seed, &mut prng_array(*seed));
let value: [T; 1] = rand_utils::prng_array(*seed);
value[0]
}
fn generate_word(seed: &mut [u8; 32]) -> Word {
mem::swap(seed, &mut prng_array(*seed));
let nums: [u64; 4] = prng_array(*seed);
[Felt::new(nums[0]), Felt::new(nums[1]), Felt::new(nums[2]), Felt::new(nums[3])]
}

View File

@@ -0,0 +1,71 @@
use std::{fmt::Debug, hint, mem, time::Duration};
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
use miden_crypto::{hash::rpo::RpoDigest, merkle::Smt, Felt, Word, ONE};
use rand_utils::prng_array;
use winter_utils::Randomizable;
// 2^0, 2^4, 2^8, 2^12, 2^16
const PAIR_COUNTS: [u64; 6] = [1, 16, 256, 4096, 65536, 1_048_576];
fn smt_with_entries(c: &mut Criterion) {
let mut seed = [0u8; 32];
let mut group = c.benchmark_group("smt-with-entries");
for pair_count in PAIR_COUNTS {
let bench_id = BenchmarkId::from_parameter(pair_count);
group.bench_with_input(bench_id, &pair_count, |b, &pair_count| {
b.iter_batched(
|| {
// Setup.
prepare_entries(pair_count, &mut seed)
},
|entries| {
// Benchmarked function.
Smt::with_entries(hint::black_box(entries)).unwrap();
},
BatchSize::SmallInput,
);
});
}
}
criterion_group! {
name = smt_with_entries_group;
config = Criterion::default()
//.measurement_time(Duration::from_secs(960))
.measurement_time(Duration::from_secs(60))
.sample_size(10)
.configure_from_args();
targets = smt_with_entries
}
criterion_main!(smt_with_entries_group);
// HELPER FUNCTIONS
// --------------------------------------------------------------------------------------------
fn prepare_entries(pair_count: u64, seed: &mut [u8; 32]) -> Vec<(RpoDigest, [Felt; 4])> {
let entries: Vec<(RpoDigest, Word)> = (0..pair_count)
.map(|i| {
let count = pair_count as f64;
let idx = ((i as f64 / count) * (count)) as u64;
let key = RpoDigest::new([generate_value(seed), ONE, Felt::new(i), Felt::new(idx)]);
let value = generate_word(seed);
(key, value)
})
.collect();
entries
}
fn generate_value<T: Copy + Debug + Randomizable>(seed: &mut [u8; 32]) -> T {
mem::swap(seed, &mut prng_array(*seed));
let value: [T; 1] = rand_utils::prng_array(*seed);
value[0]
}
fn generate_word(seed: &mut [u8; 32]) -> Word {
mem::swap(seed, &mut prng_array(*seed));
let nums: [u64; 4] = prng_array(*seed);
[Felt::new(nums[0]), Felt::new(nums[1]), Felt::new(nums[2]), Felt::new(nums[3])]
}

View File

@@ -1,3 +1,5 @@
//! Digital signature schemes supported by default in the Miden VM.
pub mod rpo_falcon512;
pub mod rpo_stark;

24
src/dsa/rpo_stark/mod.rs Normal file
View File

@@ -0,0 +1,24 @@
mod signature;
pub use signature::{PublicKey, SecretKey, Signature};
mod stark;
pub use stark::{PublicInputs, RescueAir};
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use super::SecretKey;
use crate::Word;
#[test]
fn test_signature() {
let sk = SecretKey::new(Word::default());
let message = Word::default();
let signature = sk.sign(message);
let pk = sk.public_key();
assert!(pk.verify(message, &signature))
}
}

View File

@@ -0,0 +1,173 @@
use rand::{distributions::Uniform, prelude::Distribution, Rng};
use winter_air::{FieldExtension, ProofOptions};
use winter_math::{fields::f64::BaseElement, FieldElement};
use winter_prover::Proof;
use winter_utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
use crate::{
dsa::rpo_stark::stark::RpoSignatureScheme,
hash::{rpo::Rpo256, DIGEST_SIZE},
StarkField, Word, ZERO,
};
// CONSTANTS
// ================================================================================================
/// Specifies the parameters of the STARK underlying the signature scheme. These parameters provide
/// at least 102 bits of security under the conjectured security of the toy protocol in
/// the ethSTARK paper [1].
///
/// [1]: https://eprint.iacr.org/2021/582
pub const PROOF_OPTIONS: ProofOptions =
ProofOptions::new(30, 8, 12, FieldExtension::Quadratic, 4, 7, true);
// PUBLIC KEY
// ================================================================================================
/// A public key for verifying signatures.
///
/// The public key is a [Word] (i.e., 4 field elements) that is the hash of the secret key.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PublicKey(Word);
impl PublicKey {
/// Returns the [Word] defining the public key.
pub fn inner(&self) -> Word {
self.0
}
}
impl PublicKey {
/// Verifies the provided signature against provided message and this public key.
pub fn verify(&self, message: Word, signature: &Signature) -> bool {
signature.verify(message, *self)
}
}
impl Serializable for PublicKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.0.write_into(target);
}
}
impl Deserializable for PublicKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let pk = <Word>::read_from(source)?;
Ok(Self(pk))
}
}
// SECRET KEY
// ================================================================================================
/// A secret key for generating signatures.
///
/// The secret key is a [Word] (i.e., 4 field elements).
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct SecretKey(Word);
impl SecretKey {
/// Generates a secret key from OS-provided randomness.
pub fn new(word: Word) -> Self {
Self(word)
}
/// Generates a secret key from a [Word].
#[cfg(feature = "std")]
pub fn random() -> Self {
use rand::{rngs::StdRng, SeedableRng};
let mut rng = StdRng::from_entropy();
Self::with_rng(&mut rng)
}
/// Generates a secret_key using the provided random number generator `Rng`.
pub fn with_rng<R: Rng>(rng: &mut R) -> Self {
let mut sk = [ZERO; 4];
let uni_dist = Uniform::from(0..BaseElement::MODULUS);
for s in sk.iter_mut() {
let sampled_integer = uni_dist.sample(rng);
*s = BaseElement::new(sampled_integer);
}
Self(sk)
}
/// Computes the public key corresponding to this secret key.
pub fn public_key(&self) -> PublicKey {
let mut elements = [BaseElement::ZERO; 8];
elements[..DIGEST_SIZE].copy_from_slice(&self.0);
let pk = Rpo256::hash_elements(&elements);
PublicKey(pk.into())
}
/// Signs a message with this secret key.
pub fn sign(&self, message: Word) -> Signature {
let signature: RpoSignatureScheme<Rpo256> = RpoSignatureScheme::new(PROOF_OPTIONS);
let proof = signature.sign(self.0, message);
Signature { proof }
}
}
impl Serializable for SecretKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.0.write_into(target);
}
}
impl Deserializable for SecretKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let sk = <Word>::read_from(source)?;
Ok(Self(sk))
}
}
// SIGNATURE
// ================================================================================================
/// An RPO STARK-based signature over a message.
///
/// The signature is a STARK proof of knowledge of a pre-image given an image where the map is
/// the RPO permutation, the pre-image is the secret key and the image is the public key.
/// The current implementation follows the description in [1] but relies on the conjectured security
/// of the toy protocol in the ethSTARK paper [2], which gives us using the parameter set
/// given in `PROOF_OPTIONS` a signature with $102$ bits of average-case existential unforgeability
/// security against $2^{113}$-query bound adversaries that can obtain up to $2^{64}$ signatures
/// under the same public key.
///
/// [1]: https://eprint.iacr.org/2024/1553
/// [2]: https://eprint.iacr.org/2021/582
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Signature {
proof: Proof,
}
impl Signature {
/// Returns the STARK proof constituting the signature.
pub fn inner(&self) -> Proof {
self.proof.clone()
}
/// Returns true if this signature is a valid signature for the specified message generated
/// against the secret key matching the specified public key.
pub fn verify(&self, message: Word, pk: PublicKey) -> bool {
let signature: RpoSignatureScheme<Rpo256> = RpoSignatureScheme::new(PROOF_OPTIONS);
let res = signature.verify(pk.inner(), message, self.proof.clone());
res.is_ok()
}
}
impl Serializable for Signature {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.proof.write_into(target);
}
}
impl Deserializable for Signature {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let proof = Proof::read_from(source)?;
Ok(Self { proof })
}
}

View File

@@ -0,0 +1,198 @@
use alloc::vec::Vec;
use winter_math::{fields::f64::BaseElement, FieldElement, ToElements};
use winter_prover::{
Air, AirContext, Assertion, EvaluationFrame, ProofOptions, TraceInfo,
TransitionConstraintDegree,
};
use crate::{
hash::{ARK1, ARK2, MDS, STATE_WIDTH},
Word, ZERO,
};
// CONSTANTS
// ================================================================================================
pub const HASH_CYCLE_LEN: usize = 8;
// AIR
// ================================================================================================
pub struct RescueAir {
context: AirContext<BaseElement>,
pub_key: Word,
}
impl Air for RescueAir {
type BaseField = BaseElement;
type PublicInputs = PublicInputs;
type GkrProof = ();
type GkrVerifier = ();
// CONSTRUCTOR
// --------------------------------------------------------------------------------------------
fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self {
let degrees = vec![
// Apply RPO rounds.
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
TransitionConstraintDegree::new(7),
];
assert_eq!(STATE_WIDTH, trace_info.width());
let context = AirContext::new(trace_info, degrees, 12, options);
let context = context.set_num_transition_exemptions(1);
RescueAir { context, pub_key: pub_inputs.pub_key }
}
fn context(&self) -> &AirContext<Self::BaseField> {
&self.context
}
fn evaluate_transition<E: FieldElement + From<Self::BaseField>>(
&self,
frame: &EvaluationFrame<E>,
periodic_values: &[E],
result: &mut [E],
) {
let current = frame.current();
let next = frame.next();
// expected state width is 12 field elements
debug_assert_eq!(STATE_WIDTH, current.len());
debug_assert_eq!(STATE_WIDTH, next.len());
enforce_rpo_round(frame, result, periodic_values);
}
fn get_assertions(&self) -> Vec<Assertion<Self::BaseField>> {
let initial_step = 0;
let last_step = self.trace_length() - 1;
vec![
// Assert that the capacity as well as the second half of the rate portion of the state
// are initialized to `ZERO`.The first half of the rate is unconstrained as it will
// contain the secret key
Assertion::single(0, initial_step, Self::BaseField::ZERO),
Assertion::single(1, initial_step, Self::BaseField::ZERO),
Assertion::single(2, initial_step, Self::BaseField::ZERO),
Assertion::single(3, initial_step, Self::BaseField::ZERO),
Assertion::single(8, initial_step, Self::BaseField::ZERO),
Assertion::single(9, initial_step, Self::BaseField::ZERO),
Assertion::single(10, initial_step, Self::BaseField::ZERO),
Assertion::single(11, initial_step, Self::BaseField::ZERO),
// Assert that the public key is the correct one
Assertion::single(4, last_step, self.pub_key[0]),
Assertion::single(5, last_step, self.pub_key[1]),
Assertion::single(6, last_step, self.pub_key[2]),
Assertion::single(7, last_step, self.pub_key[3]),
]
}
fn get_periodic_column_values(&self) -> Vec<Vec<Self::BaseField>> {
get_round_constants()
}
}
pub struct PublicInputs {
pub(crate) pub_key: Word,
pub(crate) msg: Word,
}
impl PublicInputs {
pub fn new(pub_key: Word, msg: Word) -> Self {
Self { pub_key, msg }
}
}
impl ToElements<BaseElement> for PublicInputs {
fn to_elements(&self) -> Vec<BaseElement> {
let mut res = self.pub_key.to_vec();
res.extend_from_slice(self.msg.as_ref());
res
}
}
// HELPER EVALUATORS
// ------------------------------------------------------------------------------------------------
/// Enforces constraints for a single round of the Rescue Prime Optimized hash functions.
pub fn enforce_rpo_round<E: FieldElement + From<BaseElement>>(
frame: &EvaluationFrame<E>,
result: &mut [E],
ark: &[E],
) {
// compute the state that should result from applying the first 5 operations of the RPO round to
// the current hash state.
let mut step1 = [E::ZERO; STATE_WIDTH];
step1.copy_from_slice(frame.current());
apply_mds(&mut step1);
// add constants
for i in 0..STATE_WIDTH {
step1[i] += ark[i];
}
apply_sbox(&mut step1);
apply_mds(&mut step1);
// add constants
for i in 0..STATE_WIDTH {
step1[i] += ark[STATE_WIDTH + i];
}
// compute the state that should result from applying the inverse of the last operation of the
// RPO round to the next step of the computation.
let mut step2 = [E::ZERO; STATE_WIDTH];
step2.copy_from_slice(frame.next());
apply_sbox(&mut step2);
// make sure that the results are equal.
for i in 0..STATE_WIDTH {
result[i] = step2[i] - step1[i]
}
}
#[inline(always)]
fn apply_sbox<E: FieldElement + From<BaseElement>>(state: &mut [E; STATE_WIDTH]) {
state.iter_mut().for_each(|v| {
let t2 = v.square();
let t4 = t2.square();
*v *= t2 * t4;
});
}
#[inline(always)]
fn apply_mds<E: FieldElement + From<BaseElement>>(state: &mut [E; STATE_WIDTH]) {
let mut result = [E::ZERO; STATE_WIDTH];
result.iter_mut().zip(MDS).for_each(|(r, mds_row)| {
state.iter().zip(mds_row).for_each(|(&s, m)| {
*r += E::from(m) * s;
});
});
*state = result
}
/// Returns RPO round constants arranged in column-major form.
pub fn get_round_constants() -> Vec<Vec<BaseElement>> {
let mut constants = Vec::new();
for _ in 0..(STATE_WIDTH * 2) {
constants.push(vec![ZERO; HASH_CYCLE_LEN]);
}
#[allow(clippy::needless_range_loop)]
for i in 0..HASH_CYCLE_LEN - 1 {
for j in 0..STATE_WIDTH {
constants[j][i] = ARK1[i][j];
constants[j + STATE_WIDTH][i] = ARK2[i][j];
}
}
constants
}

View File

@@ -0,0 +1,98 @@
use alloc::vec::Vec;
use core::marker::PhantomData;
use prover::RpoSignatureProver;
use rand_chacha::ChaCha20Rng;
use winter_crypto::{ElementHasher, SaltedMerkleTree};
use winter_math::fields::f64::BaseElement;
use winter_prover::{Proof, ProofOptions, Prover};
use winter_utils::Serializable;
use winter_verifier::{verify, AcceptableOptions, VerifierError};
use crate::{
hash::{rpo::Rpo256, DIGEST_SIZE},
rand::RpoRandomCoin,
};
mod air;
pub use air::{PublicInputs, RescueAir};
mod prover;
/// Represents an abstract STARK-based signature scheme with knowledge of RPO pre-image as
/// the hard relation.
pub struct RpoSignatureScheme<H: ElementHasher> {
options: ProofOptions,
_h: PhantomData<H>,
}
impl<H: ElementHasher<BaseField = BaseElement> + Sync> RpoSignatureScheme<H> {
pub fn new(options: ProofOptions) -> Self {
RpoSignatureScheme { options, _h: PhantomData }
}
pub fn sign(&self, sk: [BaseElement; DIGEST_SIZE], msg: [BaseElement; DIGEST_SIZE]) -> Proof {
// create a prover
let prover = RpoSignatureProver::<H>::new(msg, self.options.clone());
// generate execution trace
let trace = prover.build_trace(sk);
// generate the initial seed for the PRNG used for zero-knowledge
let seed: [u8; 32] = generate_seed(sk, msg);
// generate the proof
prover.prove(trace, Some(seed)).expect("failed to generate the signature")
}
pub fn verify(
&self,
pub_key: [BaseElement; DIGEST_SIZE],
msg: [BaseElement; DIGEST_SIZE],
proof: Proof,
) -> Result<(), VerifierError> {
// we make sure that the parameters used in generating the proof match the expected ones
if *proof.options() != self.options {
return Err(VerifierError::UnacceptableProofOptions);
}
let pub_inputs = PublicInputs { pub_key, msg };
let acceptable_options = AcceptableOptions::OptionSet(vec![proof.options().clone()]);
verify::<RescueAir, Rpo256, RpoRandomCoin, SaltedMerkleTree<Rpo256, ChaCha20Rng>>(
proof,
pub_inputs,
&acceptable_options,
)
}
}
/// Deterministically generates a seed for seeding the PRNG used for zero-knowledge.
///
/// This uses the argument described in [RFC 6979](https://datatracker.ietf.org/doc/html/rfc6979#section-3.5)
/// § 3.5 where the concatenation of the private key and the hashed message, i.e., sk || H(m), is
/// used in order to construct the initial seed of a PRNG.
///
/// Note that we hash in also a context string in order to domain separate between different
/// instantiations of the signature scheme.
#[inline]
pub fn generate_seed(sk: [BaseElement; DIGEST_SIZE], msg: [BaseElement; DIGEST_SIZE]) -> [u8; 32] {
let context_bytes = "
Seed for PRNG used for Zero-knowledge in RPO-STARK signature scheme:
1. Version: Conjectured security
2. FRI queries: 30
3. Blowup factor: 8
4. Grinding bits: 12
5. Field extension degree: 2
6. FRI folding factor: 4
7. FRI remainder polynomial max degree: 7
"
.to_bytes();
let sk_bytes = sk.to_bytes();
let msg_bytes = msg.to_bytes();
let total_length = context_bytes.len() + sk_bytes.len() + msg_bytes.len();
let mut buffer = Vec::with_capacity(total_length);
buffer.extend_from_slice(&context_bytes);
buffer.extend_from_slice(&sk_bytes);
buffer.extend_from_slice(&msg_bytes);
blake3::hash(&buffer).into()
}

View File

@@ -0,0 +1,148 @@
use core::marker::PhantomData;
use rand_chacha::ChaCha20Rng;
use winter_air::{
AuxRandElements, ConstraintCompositionCoefficients, PartitionOptions, ZkParameters,
};
use winter_crypto::{ElementHasher, SaltedMerkleTree};
use winter_math::{fields::f64::BaseElement, FieldElement};
use winter_prover::{
matrix::ColMatrix, CompositionPoly, CompositionPolyTrace, DefaultConstraintCommitment,
DefaultConstraintEvaluator, DefaultTraceLde, ProofOptions, Prover, StarkDomain, Trace,
TraceInfo, TracePolyTable, TraceTable,
};
use super::air::{PublicInputs, RescueAir, HASH_CYCLE_LEN};
use crate::{
hash::{rpo::Rpo256, STATE_WIDTH},
rand::RpoRandomCoin,
Word, ZERO,
};
// PROVER
// ================================================================================================
/// A prover for the RPO STARK-based signature scheme.
///
/// The signature is based on the the one-wayness of the RPO hash function but it is generic over
/// the hash function used for instantiating the random oracle for the BCS transform.
pub(crate) struct RpoSignatureProver<H: ElementHasher + Sync> {
message: Word,
options: ProofOptions,
_hasher: PhantomData<H>,
}
impl<H: ElementHasher + Sync> RpoSignatureProver<H> {
pub(crate) fn new(message: Word, options: ProofOptions) -> Self {
Self { message, options, _hasher: PhantomData }
}
pub(crate) fn build_trace(&self, sk: Word) -> TraceTable<BaseElement> {
let mut trace = TraceTable::new(STATE_WIDTH, HASH_CYCLE_LEN);
trace.fill(
|state| {
// initialize first half of the rate portion of the state with the secret key
state[0] = ZERO;
state[1] = ZERO;
state[2] = ZERO;
state[3] = ZERO;
state[4] = sk[0];
state[5] = sk[1];
state[6] = sk[2];
state[7] = sk[3];
state[8] = ZERO;
state[9] = ZERO;
state[10] = ZERO;
state[11] = ZERO;
},
|step, state| {
Rpo256::apply_round(
state.try_into().expect("should not fail given the size of the array"),
step,
);
},
);
trace
}
}
impl<H: ElementHasher> Prover for RpoSignatureProver<H>
where
H: ElementHasher<BaseField = BaseElement> + Sync,
{
type BaseField = BaseElement;
type Air = RescueAir;
type Trace = TraceTable<BaseElement>;
type HashFn = Rpo256;
type VC = SaltedMerkleTree<Self::HashFn, Self::ZkPrng>;
type RandomCoin = RpoRandomCoin;
type TraceLde<E: FieldElement<BaseField = Self::BaseField>> =
DefaultTraceLde<E, Self::HashFn, Self::VC>;
type ConstraintCommitment<E: FieldElement<BaseField = Self::BaseField>> =
DefaultConstraintCommitment<E, Self::HashFn, Self::ZkPrng, Self::VC>;
type ConstraintEvaluator<'a, E: FieldElement<BaseField = Self::BaseField>> =
DefaultConstraintEvaluator<'a, Self::Air, E>;
type ZkPrng = ChaCha20Rng;
fn get_pub_inputs(&self, trace: &Self::Trace) -> PublicInputs {
let last_step = trace.length() - 1;
// Note that the message is not part of the execution trace but is part of the public
// inputs. This is explained in the reference description of the DSA and intuitively
// it is done in order to make sure that the message is part of the Fiat-Shamir
// transcript and hence binds the proof/signature to the message
PublicInputs {
pub_key: [
trace.get(4, last_step),
trace.get(5, last_step),
trace.get(6, last_step),
trace.get(7, last_step),
],
msg: self.message,
}
}
fn options(&self) -> &ProofOptions {
&self.options
}
fn new_trace_lde<E: FieldElement<BaseField = Self::BaseField>>(
&self,
trace_info: &TraceInfo,
main_trace: &ColMatrix<Self::BaseField>,
domain: &StarkDomain<Self::BaseField>,
partition_option: PartitionOptions,
zk_parameters: Option<ZkParameters>,
prng: &mut Option<Self::ZkPrng>,
) -> (Self::TraceLde<E>, TracePolyTable<E>) {
DefaultTraceLde::new(trace_info, main_trace, domain, partition_option, zk_parameters, prng)
}
fn new_evaluator<'a, E: FieldElement<BaseField = Self::BaseField>>(
&self,
air: &'a Self::Air,
aux_rand_elements: Option<AuxRandElements<E>>,
composition_coefficients: ConstraintCompositionCoefficients<E>,
) -> Self::ConstraintEvaluator<'a, E> {
DefaultConstraintEvaluator::new(air, aux_rand_elements, composition_coefficients)
}
fn build_constraint_commitment<E: FieldElement<BaseField = Self::BaseField>>(
&self,
composition_poly_trace: CompositionPolyTrace<E>,
num_constraint_composition_columns: usize,
domain: &StarkDomain<Self::BaseField>,
partition_options: PartitionOptions,
zk_parameters: Option<ZkParameters>,
prng: &mut Option<Self::ZkPrng>,
) -> (Self::ConstraintCommitment<E>, CompositionPoly<E>) {
DefaultConstraintCommitment::new(
composition_poly_trace,
num_constraint_composition_columns,
domain,
partition_options,
zk_parameters,
prng,
)
}
}

View File

@@ -5,6 +5,7 @@ use super::{CubeExtension, Felt, FieldElement, StarkField, ZERO};
pub mod blake;
mod rescue;
pub(crate) use rescue::{ARK1, ARK2, DIGEST_SIZE, MDS, STATE_WIDTH};
pub mod rpo {
pub use super::rescue::{Rpo256, RpoDigest, RpoDigestError};
}

View File

@@ -6,7 +6,7 @@ mod arch;
pub use arch::optimized::{add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox};
mod mds;
use mds::{apply_mds, MDS};
pub(crate) use mds::{apply_mds, MDS};
mod rpo;
pub use rpo::{Rpo256, RpoDigest, RpoDigestError};
@@ -26,7 +26,7 @@ const NUM_ROUNDS: usize = 7;
/// Sponge state is set to 12 field elements or 96 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
const STATE_WIDTH: usize = 12;
pub(crate) const STATE_WIDTH: usize = 12;
/// The rate portion of the state is located in elements 4 through 11.
const RATE_RANGE: Range<usize> = 4..12;
@@ -42,8 +42,8 @@ const CAPACITY_RANGE: Range<usize> = 0..4;
///
/// The digest is returned from state elements 4, 5, 6, and 7 (the first four elements of the
/// rate portion).
const DIGEST_RANGE: Range<usize> = 4..8;
const DIGEST_SIZE: usize = DIGEST_RANGE.end - DIGEST_RANGE.start;
pub(crate) const DIGEST_RANGE: Range<usize> = 4..8;
pub(crate) const DIGEST_SIZE: usize = DIGEST_RANGE.end - DIGEST_RANGE.start;
/// The number of bytes needed to encoded a digest
const DIGEST_BYTES: usize = 32;
@@ -144,7 +144,7 @@ fn add_constants(state: &mut [Felt; STATE_WIDTH], ark: &[Felt; STATE_WIDTH]) {
///
/// The constants are broken up into two arrays ARK1 and ARK2; ARK1 contains the constants for the
/// first half of RPO round, and ARK2 contains constants for the second half of RPO round.
const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
pub(crate) const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
[
Felt::new(5789762306288267392),
Felt::new(6522564764413701783),
@@ -245,7 +245,7 @@ const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
],
];
const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
pub(crate) const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
[
Felt::new(6077062762357204287),
Felt::new(15277620170502011191),

View File

@@ -1,6 +1,12 @@
use alloc::string::String;
use core::{cmp::Ordering, fmt::Display, ops::Deref, slice};
use rand::{
distributions::{Standard, Uniform},
prelude::Distribution,
};
use thiserror::Error;
use super::{Digest, Felt, StarkField, DIGEST_BYTES, DIGEST_SIZE, ZERO};
use crate::{
rand::Randomizable,
@@ -124,12 +130,27 @@ impl Randomizable for RpoDigest {
}
}
impl Distribution<RpoDigest> for Standard {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> RpoDigest {
let mut res = [ZERO; DIGEST_SIZE];
let uni_dist = Uniform::from(0..Felt::MODULUS);
for r in res.iter_mut() {
let sampled_integer = uni_dist.sample(rng);
*r = Felt::new(sampled_integer);
}
RpoDigest::new(res)
}
}
// CONVERSIONS: FROM RPO DIGEST
// ================================================================================================
#[derive(Copy, Clone, Debug)]
#[derive(Debug, Error)]
pub enum RpoDigestError {
InvalidInteger,
#[error("failed to convert digest field element to {0}")]
TypeConversion(&'static str),
#[error("failed to convert to field element: {0}")]
InvalidFieldElement(String),
}
impl TryFrom<&RpoDigest> for [bool; DIGEST_SIZE] {
@@ -153,10 +174,10 @@ impl TryFrom<RpoDigest> for [bool; DIGEST_SIZE] {
}
Ok([
to_bool(value.0[0].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
to_bool(value.0[1].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
to_bool(value.0[2].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
to_bool(value.0[3].as_int()).ok_or(RpoDigestError::InvalidInteger)?,
to_bool(value.0[0].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
to_bool(value.0[1].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
to_bool(value.0[2].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
to_bool(value.0[3].as_int()).ok_or(RpoDigestError::TypeConversion("bool"))?,
])
}
}
@@ -174,10 +195,22 @@ impl TryFrom<RpoDigest> for [u8; DIGEST_SIZE] {
fn try_from(value: RpoDigest) -> Result<Self, Self::Error> {
Ok([
value.0[0].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[1].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[2].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[3].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[0]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
value.0[1]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
value.0[2]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
value.0[3]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u8"))?,
])
}
}
@@ -195,10 +228,22 @@ impl TryFrom<RpoDigest> for [u16; DIGEST_SIZE] {
fn try_from(value: RpoDigest) -> Result<Self, Self::Error> {
Ok([
value.0[0].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[1].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[2].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[3].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[0]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
value.0[1]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
value.0[2]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
value.0[3]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u16"))?,
])
}
}
@@ -216,10 +261,22 @@ impl TryFrom<RpoDigest> for [u32; DIGEST_SIZE] {
fn try_from(value: RpoDigest) -> Result<Self, Self::Error> {
Ok([
value.0[0].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[1].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[2].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[3].as_int().try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value.0[0]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
value.0[1]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
value.0[2]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
value.0[3]
.as_int()
.try_into()
.map_err(|_| RpoDigestError::TypeConversion("u32"))?,
])
}
}
@@ -343,10 +400,10 @@ impl TryFrom<[u64; DIGEST_SIZE]> for RpoDigest {
fn try_from(value: [u64; DIGEST_SIZE]) -> Result<Self, RpoDigestError> {
Ok(Self([
value[0].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value[1].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value[2].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value[3].try_into().map_err(|_| RpoDigestError::InvalidInteger)?,
value[0].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
value[1].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
value[2].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
value[3].try_into().map_err(RpoDigestError::InvalidFieldElement)?,
]))
}
}

View File

@@ -1,6 +1,8 @@
use alloc::string::String;
use core::{cmp::Ordering, fmt::Display, ops::Deref, slice};
use thiserror::Error;
use super::{Digest, Felt, StarkField, DIGEST_BYTES, DIGEST_SIZE, ZERO};
use crate::{
rand::Randomizable,
@@ -127,9 +129,12 @@ impl Randomizable for RpxDigest {
// CONVERSIONS: FROM RPX DIGEST
// ================================================================================================
#[derive(Copy, Clone, Debug)]
#[derive(Debug, Error)]
pub enum RpxDigestError {
InvalidInteger,
#[error("failed to convert digest field element to {0}")]
TypeConversion(&'static str),
#[error("failed to convert to field element: {0}")]
InvalidFieldElement(String),
}
impl TryFrom<&RpxDigest> for [bool; DIGEST_SIZE] {
@@ -153,10 +158,10 @@ impl TryFrom<RpxDigest> for [bool; DIGEST_SIZE] {
}
Ok([
to_bool(value.0[0].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
to_bool(value.0[1].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
to_bool(value.0[2].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
to_bool(value.0[3].as_int()).ok_or(RpxDigestError::InvalidInteger)?,
to_bool(value.0[0].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
to_bool(value.0[1].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
to_bool(value.0[2].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
to_bool(value.0[3].as_int()).ok_or(RpxDigestError::TypeConversion("bool"))?,
])
}
}
@@ -174,10 +179,22 @@ impl TryFrom<RpxDigest> for [u8; DIGEST_SIZE] {
fn try_from(value: RpxDigest) -> Result<Self, Self::Error> {
Ok([
value.0[0].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[1].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[2].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[3].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[0]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
value.0[1]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
value.0[2]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
value.0[3]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u8"))?,
])
}
}
@@ -195,10 +212,22 @@ impl TryFrom<RpxDigest> for [u16; DIGEST_SIZE] {
fn try_from(value: RpxDigest) -> Result<Self, Self::Error> {
Ok([
value.0[0].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[1].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[2].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[3].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[0]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
value.0[1]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
value.0[2]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
value.0[3]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u16"))?,
])
}
}
@@ -216,10 +245,22 @@ impl TryFrom<RpxDigest> for [u32; DIGEST_SIZE] {
fn try_from(value: RpxDigest) -> Result<Self, Self::Error> {
Ok([
value.0[0].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[1].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[2].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[3].as_int().try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value.0[0]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
value.0[1]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
value.0[2]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
value.0[3]
.as_int()
.try_into()
.map_err(|_| RpxDigestError::TypeConversion("u32"))?,
])
}
}
@@ -343,10 +384,10 @@ impl TryFrom<[u64; DIGEST_SIZE]> for RpxDigest {
fn try_from(value: [u64; DIGEST_SIZE]) -> Result<Self, RpxDigestError> {
Ok(Self([
value[0].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value[1].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value[2].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value[3].try_into().map_err(|_| RpxDigestError::InvalidInteger)?,
value[0].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
value[1].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
value[2].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
value[3].try_into().map_err(RpxDigestError::InvalidFieldElement)?,
]))
}
}

View File

@@ -1,65 +1,34 @@
use alloc::vec::Vec;
use core::fmt;
use thiserror::Error;
use super::{smt::SmtLeafError, MerklePath, NodeIndex, RpoDigest};
use super::{NodeIndex, RpoDigest};
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Debug, Error)]
pub enum MerkleError {
ConflictingRoots(Vec<RpoDigest>),
#[error("expected merkle root {expected_root} found {actual_root}")]
ConflictingRoots {
expected_root: RpoDigest,
actual_root: RpoDigest,
},
#[error("provided merkle tree depth {0} is too small")]
DepthTooSmall(u8),
#[error("provided merkle tree depth {0} is too big")]
DepthTooBig(u64),
#[error("multiple values provided for merkle tree index {0}")]
DuplicateValuesForIndex(u64),
DuplicateValuesForKey(RpoDigest),
InvalidIndex { depth: u8, value: u64 },
InvalidDepth { expected: u8, provided: u8 },
InvalidSubtreeDepth { subtree_depth: u8, tree_depth: u8 },
InvalidPath(MerklePath),
InvalidNumEntries(usize),
NodeNotInSet(NodeIndex),
NodeNotInStore(RpoDigest, NodeIndex),
#[error("node index value {value} is not valid for depth {depth}")]
InvalidNodeIndex { depth: u8, value: u64 },
#[error("provided node index depth {provided} does not match expected depth {expected}")]
InvalidNodeIndexDepth { expected: u8, provided: u8 },
#[error("merkle subtree depth {subtree_depth} exceeds merkle tree depth {tree_depth}")]
SubtreeDepthExceedsDepth { subtree_depth: u8, tree_depth: u8 },
#[error("number of entries in the merkle tree exceeds the maximum of {0}")]
TooManyEntries(usize),
#[error("node index `{0}` not found in the tree")]
NodeIndexNotFoundInTree(NodeIndex),
#[error("node {0:?} with index `{1}` not found in the store")]
NodeIndexNotFoundInStore(RpoDigest, NodeIndex),
#[error("number of provided merkle tree leaves {0} is not a power of two")]
NumLeavesNotPowerOfTwo(usize),
#[error("root {0:?} is not in the store")]
RootNotInStore(RpoDigest),
SmtLeaf(SmtLeafError),
}
impl fmt::Display for MerkleError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use MerkleError::*;
match self {
ConflictingRoots(roots) => write!(f, "the merkle paths roots do not match {roots:?}"),
DepthTooSmall(depth) => write!(f, "the provided depth {depth} is too small"),
DepthTooBig(depth) => write!(f, "the provided depth {depth} is too big"),
DuplicateValuesForIndex(key) => write!(f, "multiple values provided for key {key}"),
DuplicateValuesForKey(key) => write!(f, "multiple values provided for key {key}"),
InvalidIndex { depth, value } => {
write!(f, "the index value {value} is not valid for the depth {depth}")
},
InvalidDepth { expected, provided } => {
write!(f, "the provided depth {provided} is not valid for {expected}")
},
InvalidSubtreeDepth { subtree_depth, tree_depth } => {
write!(f, "tried inserting a subtree of depth {subtree_depth} into a tree of depth {tree_depth}")
},
InvalidPath(_path) => write!(f, "the provided path is not valid"),
InvalidNumEntries(max) => write!(f, "number of entries exceeded the maximum: {max}"),
NodeNotInSet(index) => write!(f, "the node with index ({index}) is not in the set"),
NodeNotInStore(hash, index) => {
write!(f, "the node {hash:?} with index ({index}) is not in the store")
},
NumLeavesNotPowerOfTwo(leaves) => {
write!(f, "the leaves count {leaves} is not a power of 2")
},
RootNotInStore(root) => write!(f, "the root {:?} is not in the store", root),
SmtLeaf(smt_leaf_error) => write!(f, "smt leaf error: {smt_leaf_error}"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for MerkleError {}
impl From<SmtLeafError> for MerkleError {
fn from(value: SmtLeafError) -> Self {
Self::SmtLeaf(value)
}
}

View File

@@ -38,7 +38,7 @@ impl NodeIndex {
/// Returns an error if the `value` is greater than or equal to 2^{depth}.
pub const fn new(depth: u8, value: u64) -> Result<Self, MerkleError> {
if (64 - value.leading_zeros()) > depth as u32 {
Err(MerkleError::InvalidIndex { depth, value })
Err(MerkleError::InvalidNodeIndex { depth, value })
} else {
Ok(Self { depth, value })
}
@@ -97,6 +97,14 @@ impl NodeIndex {
self
}
/// Returns the parent of the current node. This is the same as [`Self::move_up()`], but returns
/// a new value instead of mutating `self`.
pub const fn parent(mut self) -> Self {
self.depth = self.depth.saturating_sub(1);
self.value >>= 1;
self
}
// PROVIDERS
// --------------------------------------------------------------------------------------------
@@ -182,6 +190,7 @@ impl Deserializable for NodeIndex {
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use proptest::prelude::*;
use super::*;
@@ -190,19 +199,19 @@ mod tests {
fn test_node_index_value_too_high() {
assert_eq!(NodeIndex::new(0, 0).unwrap(), NodeIndex { depth: 0, value: 0 });
let err = NodeIndex::new(0, 1).unwrap_err();
assert_eq!(err, MerkleError::InvalidIndex { depth: 0, value: 1 });
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 0, value: 1 });
assert_eq!(NodeIndex::new(1, 1).unwrap(), NodeIndex { depth: 1, value: 1 });
let err = NodeIndex::new(1, 2).unwrap_err();
assert_eq!(err, MerkleError::InvalidIndex { depth: 1, value: 2 });
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 1, value: 2 });
assert_eq!(NodeIndex::new(2, 3).unwrap(), NodeIndex { depth: 2, value: 3 });
let err = NodeIndex::new(2, 4).unwrap_err();
assert_eq!(err, MerkleError::InvalidIndex { depth: 2, value: 4 });
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 2, value: 4 });
assert_eq!(NodeIndex::new(3, 7).unwrap(), NodeIndex { depth: 3, value: 7 });
let err = NodeIndex::new(3, 8).unwrap_err();
assert_eq!(err, MerkleError::InvalidIndex { depth: 3, value: 8 });
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 3, value: 8 });
}
#[test]

View File

@@ -1,41 +1,27 @@
use core::fmt::{Display, Formatter};
#[cfg(feature = "std")]
use std::error::Error;
use alloc::string::String;
use thiserror::Error;
use crate::merkle::MerkleError;
#[derive(Debug, PartialEq, Eq, Clone)]
#[derive(Debug, Error)]
pub enum MmrError {
InvalidPosition(usize),
InvalidPeaks,
InvalidPeak,
PeakOutOfBounds(usize, usize),
#[error("mmr does not contain position {0}")]
PositionNotFound(usize),
#[error("mmr peaks are invalid: {0}")]
InvalidPeaks(String),
#[error(
"mmr peak does not match the computed merkle root of the provided authentication path"
)]
PeakPathMismatch,
#[error("requested peak index is {peak_idx} but the number of peaks is {peaks_len}")]
PeakOutOfBounds { peak_idx: usize, peaks_len: usize },
#[error("invalid mmr update")]
InvalidUpdate,
UnknownPeak,
MerkleError(MerkleError),
#[error("mmr does not contain a peak with depth {0}")]
UnknownPeak(u8),
#[error("invalid merkle path")]
InvalidMerklePath(#[source] MerkleError),
#[error("merkle root computation failed")]
MerkleRootComputationFailed(#[source] MerkleError),
}
impl Display for MmrError {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
match self {
MmrError::InvalidPosition(pos) => write!(fmt, "Mmr does not contain position {pos}"),
MmrError::InvalidPeaks => write!(fmt, "Invalid peaks count"),
MmrError::InvalidPeak => {
write!(fmt, "Peak values does not match merkle path computed root")
},
MmrError::PeakOutOfBounds(peak_idx, peaks_len) => write!(
fmt,
"Requested peak index is {} but the number of peaks is {}",
peak_idx, peaks_len
),
MmrError::InvalidUpdate => write!(fmt, "Invalid Mmr update"),
MmrError::UnknownPeak => {
write!(fmt, "Peak not in Mmr")
},
MmrError::MerkleError(err) => write!(fmt, "{}", err),
}
}
}
#[cfg(feature = "std")]
impl Error for MmrError {}

View File

@@ -99,7 +99,7 @@ impl Mmr {
pub fn open_at(&self, pos: usize, forest: usize) -> Result<MmrProof, MmrError> {
// find the target tree responsible for the MMR position
let tree_bit =
leaf_to_corresponding_tree(pos, forest).ok_or(MmrError::InvalidPosition(pos))?;
leaf_to_corresponding_tree(pos, forest).ok_or(MmrError::PositionNotFound(pos))?;
// isolate the trees before the target
let forest_before = forest & high_bitmask(tree_bit + 1);
@@ -126,7 +126,7 @@ impl Mmr {
pub fn get(&self, pos: usize) -> Result<RpoDigest, MmrError> {
// find the target tree responsible for the MMR position
let tree_bit =
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::InvalidPosition(pos))?;
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::PositionNotFound(pos))?;
// isolate the trees before the target
let forest_before = self.forest & high_bitmask(tree_bit + 1);
@@ -174,7 +174,10 @@ impl Mmr {
/// Returns an error if the specified `forest` value is not valid for this MMR.
pub fn peaks_at(&self, forest: usize) -> Result<MmrPeaks, MmrError> {
if forest > self.forest {
return Err(MmrError::InvalidPeaks);
return Err(MmrError::InvalidPeaks(format!(
"requested forest {forest} exceeds current forest {}",
self.forest
)));
}
let peaks: Vec<RpoDigest> = TrueBitPositionIterator::new(forest)
@@ -199,7 +202,7 @@ impl Mmr {
/// that have been merged together, followed by the new peaks of the [Mmr].
pub fn get_delta(&self, from_forest: usize, to_forest: usize) -> Result<MmrDelta, MmrError> {
if to_forest > self.forest || from_forest > to_forest {
return Err(MmrError::InvalidPeaks);
return Err(MmrError::InvalidPeaks(format!("to_forest {to_forest} exceeds the current forest {} or from_forest {from_forest} exceeds to_forest", self.forest)));
}
if from_forest == to_forest {

View File

@@ -145,7 +145,7 @@ impl PartialMmr {
/// in the underlying MMR.
pub fn open(&self, pos: usize) -> Result<Option<MmrProof>, MmrError> {
let tree_bit =
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::InvalidPosition(pos))?;
leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::PositionNotFound(pos))?;
let depth = tree_bit as usize;
let mut nodes = Vec::with_capacity(depth);
@@ -298,7 +298,7 @@ impl PartialMmr {
// invalid.
let tree = 1 << path.depth();
if tree & self.forest == 0 {
return Err(MmrError::UnknownPeak);
return Err(MmrError::UnknownPeak(path.depth()));
};
if leaf_pos + 1 == self.forest
@@ -319,9 +319,11 @@ impl PartialMmr {
// Compute the root of the authentication path, and check it matches the current version of
// the PartialMmr.
let computed = path.compute_root(path_idx as u64, leaf).map_err(MmrError::MerkleError)?;
let computed = path
.compute_root(path_idx as u64, leaf)
.map_err(MmrError::MerkleRootComputationFailed)?;
if self.peaks[peak_pos] != computed {
return Err(MmrError::InvalidPeak);
return Err(MmrError::PeakPathMismatch);
}
let mut idx = InOrderIndex::from_leaf_pos(leaf_pos);
@@ -356,7 +358,10 @@ impl PartialMmr {
/// inserted into the partial MMR.
pub fn apply(&mut self, delta: MmrDelta) -> Result<Vec<(InOrderIndex, RpoDigest)>, MmrError> {
if delta.forest < self.forest {
return Err(MmrError::InvalidPeaks);
return Err(MmrError::InvalidPeaks(format!(
"forest of mmr delta {} is less than current forest {}",
delta.forest, self.forest
)));
}
let mut inserted_nodes = Vec::new();

View File

@@ -45,7 +45,11 @@ impl MmrPeaks {
/// Returns an error if the number of leaves and the number of peaks are inconsistent.
pub fn new(num_leaves: usize, peaks: Vec<RpoDigest>) -> Result<Self, MmrError> {
if num_leaves.count_ones() as usize != peaks.len() {
return Err(MmrError::InvalidPeaks);
return Err(MmrError::InvalidPeaks(format!(
"number of one bits in leaves is {} which does not equal peak length {}",
num_leaves.count_ones(),
peaks.len()
)));
}
Ok(Self { num_leaves, peaks })
@@ -77,7 +81,7 @@ impl MmrPeaks {
pub fn get_peak(&self, peak_idx: usize) -> Result<&RpoDigest, MmrError> {
self.peaks
.get(peak_idx)
.ok_or(MmrError::PeakOutOfBounds(peak_idx, self.peaks.len()))
.ok_or(MmrError::PeakOutOfBounds { peak_idx, peaks_len: self.peaks.len() })
}
/// Converts this [MmrPeaks] into its components: number of leaves and a vector of peaks of
@@ -106,7 +110,7 @@ impl MmrPeaks {
opening
.merkle_path
.verify(opening.relative_pos() as u64, value, root)
.map_err(MmrError::MerkleError)
.map_err(MmrError::InvalidMerklePath)
}
/// Flattens and pads the peaks to make hashing inside of the Miden VM easier.

View File

@@ -21,9 +21,11 @@ mod path;
pub use path::{MerklePath, RootPath, ValuePath};
mod smt;
#[cfg(feature = "internal")]
pub use smt::build_subtree_for_bench;
pub use smt::{
LeafIndex, MutationSet, SimpleSmt, Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError,
SMT_DEPTH, SMT_MAX_DEPTH, SMT_MIN_DEPTH,
SubtreeLeaf, SMT_DEPTH, SMT_MAX_DEPTH, SMT_MIN_DEPTH,
};
mod mmr;

View File

@@ -116,7 +116,7 @@ impl PartialMerkleTree {
// depth of 63 because we consider passing in a vector of size 2^64 infeasible.
let max = 2usize.pow(63);
if layers.len() > max {
return Err(MerkleError::InvalidNumEntries(max));
return Err(MerkleError::TooManyEntries(max));
}
// Get maximum depth
@@ -147,11 +147,12 @@ impl PartialMerkleTree {
let index = NodeIndex::new(depth, index_value)?;
// get hash of the current node
let node = nodes.get(&index).ok_or(MerkleError::NodeNotInSet(index))?;
let node =
nodes.get(&index).ok_or(MerkleError::NodeIndexNotFoundInTree(index))?;
// get hash of the sibling node
let sibling = nodes
.get(&index.sibling())
.ok_or(MerkleError::NodeNotInSet(index.sibling()))?;
.ok_or(MerkleError::NodeIndexNotFoundInTree(index.sibling()))?;
// get parent hash
let parent = Rpo256::merge(&index.build_node(*node, *sibling));
@@ -184,7 +185,10 @@ impl PartialMerkleTree {
/// # Errors
/// Returns an error if the specified NodeIndex is not contained in the nodes map.
pub fn get_node(&self, index: NodeIndex) -> Result<RpoDigest, MerkleError> {
self.nodes.get(&index).ok_or(MerkleError::NodeNotInSet(index)).copied()
self.nodes
.get(&index)
.ok_or(MerkleError::NodeIndexNotFoundInTree(index))
.copied()
}
/// Returns true if provided index contains in the leaves set, false otherwise.
@@ -224,7 +228,7 @@ impl PartialMerkleTree {
}
if !self.nodes.contains_key(&index) {
return Err(MerkleError::NodeNotInSet(index));
return Err(MerkleError::NodeIndexNotFoundInTree(index));
}
let mut path = Vec::new();
@@ -335,15 +339,16 @@ impl PartialMerkleTree {
if self.root() == EMPTY_DIGEST {
self.nodes.insert(ROOT_INDEX, root);
} else if self.root() != root {
return Err(MerkleError::ConflictingRoots([self.root(), root].to_vec()));
return Err(MerkleError::ConflictingRoots {
expected_root: self.root(),
actual_root: root,
});
}
Ok(())
}
/// Updates value of the leaf at the specified index returning the old leaf value.
/// By default the specified index is assumed to belong to the deepest layer. If the considered
/// node does not belong to the tree, the first node on the way to the root will be changed.
///
/// By default the specified index is assumed to belong to the deepest layer. If the considered
/// node does not belong to the tree, the first node on the way to the root will be changed.
@@ -352,6 +357,7 @@ impl PartialMerkleTree {
///
/// # Errors
/// Returns an error if:
/// - No entry exists at the specified index.
/// - The specified index is greater than the maximum number of nodes on the deepest layer.
pub fn update_leaf(&mut self, index: u64, value: Word) -> Result<RpoDigest, MerkleError> {
let mut node_index = NodeIndex::new(self.max_depth(), index)?;
@@ -367,7 +373,7 @@ impl PartialMerkleTree {
let old_value = self
.nodes
.insert(node_index, value.into())
.ok_or(MerkleError::NodeNotInSet(node_index))?;
.ok_or(MerkleError::NodeIndexNotFoundInTree(node_index))?;
// if the old value and new value are the same, there is nothing to update
if value == *old_value {

View File

@@ -61,7 +61,10 @@ impl MerklePath {
pub fn verify(&self, index: u64, node: RpoDigest, root: &RpoDigest) -> Result<(), MerkleError> {
let computed_root = self.compute_root(index, node)?;
if &computed_root != root {
return Err(MerkleError::ConflictingRoots(vec![computed_root, *root]));
return Err(MerkleError::ConflictingRoots {
expected_root: *root,
actual_root: computed_root,
});
}
Ok(())

View File

@@ -1,86 +1,39 @@
use alloc::vec::Vec;
use core::fmt;
use thiserror::Error;
use crate::{
hash::rpo::RpoDigest,
merkle::{LeafIndex, SMT_DEPTH},
Word,
};
// SMT LEAF ERROR
// =================================================================================================
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Debug, Error)]
pub enum SmtLeafError {
InconsistentKeys {
entries: Vec<(RpoDigest, Word)>,
key_1: RpoDigest,
key_2: RpoDigest,
},
InvalidNumEntriesForMultiple(usize),
SingleKeyInconsistentWithLeafIndex {
#[error(
"multiple leaf requires all keys to map to the same leaf index but key1 {key_1} and key2 {key_2} map to different indices"
)]
InconsistentMultipleLeafKeys { key_1: RpoDigest, key_2: RpoDigest },
#[error("single leaf key {key} maps to {actual_leaf_index:?} but was expected to map to {expected_leaf_index:?}")]
InconsistentSingleLeafIndices {
key: RpoDigest,
leaf_index: LeafIndex<SMT_DEPTH>,
expected_leaf_index: LeafIndex<SMT_DEPTH>,
actual_leaf_index: LeafIndex<SMT_DEPTH>,
},
MultipleKeysInconsistentWithLeafIndex {
#[error("supplied leaf index {leaf_index_supplied:?} does not match {leaf_index_from_keys:?} for multiple leaf")]
InconsistentMultipleLeafIndices {
leaf_index_from_keys: LeafIndex<SMT_DEPTH>,
leaf_index_supplied: LeafIndex<SMT_DEPTH>,
},
}
#[cfg(feature = "std")]
impl std::error::Error for SmtLeafError {}
impl fmt::Display for SmtLeafError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use SmtLeafError::*;
match self {
InvalidNumEntriesForMultiple(num_entries) => {
write!(f, "Multiple leaf requires 2 or more entries. Got: {num_entries}")
},
InconsistentKeys { entries, key_1, key_2 } => {
write!(f, "Multiple leaf requires all keys to map to the same leaf index. Offending keys: {key_1} and {key_2}. Entries: {entries:?}.")
},
SingleKeyInconsistentWithLeafIndex { key, leaf_index } => {
write!(
f,
"Single key in leaf inconsistent with leaf index. Key: {key}, leaf index: {}",
leaf_index.value()
)
},
MultipleKeysInconsistentWithLeafIndex {
leaf_index_from_keys,
leaf_index_supplied,
} => {
write!(
f,
"Keys in entries map to leaf index {}, but leaf index {} was supplied",
leaf_index_from_keys.value(),
leaf_index_supplied.value()
)
},
}
}
#[error("multiple leaf requires at least two entries but only {0} were given")]
MultipleLeafRequiresTwoEntries(usize),
}
// SMT PROOF ERROR
// =================================================================================================
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Debug, Error)]
pub enum SmtProofError {
InvalidPathLength(usize),
}
#[cfg(feature = "std")]
impl std::error::Error for SmtProofError {}
impl fmt::Display for SmtProofError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use SmtProofError::*;
match self {
InvalidPathLength(path_length) => {
write!(f, "Invalid Merkle path length. Expected {SMT_DEPTH}, got {path_length}")
},
}
}
#[error("merkle path length {0} does not match SMT depth {SMT_DEPTH}")]
InvalidMerklePathLength(usize),
}

View File

@@ -31,10 +31,12 @@ impl SmtLeaf {
1 => {
let (key, value) = entries[0];
if LeafIndex::<SMT_DEPTH>::from(key) != leaf_index {
return Err(SmtLeafError::SingleKeyInconsistentWithLeafIndex {
let computed_index = LeafIndex::<SMT_DEPTH>::from(key);
if computed_index != leaf_index {
return Err(SmtLeafError::InconsistentSingleLeafIndices {
key,
leaf_index,
expected_leaf_index: leaf_index,
actual_leaf_index: computed_index,
});
}
@@ -46,7 +48,7 @@ impl SmtLeaf {
// `new_multiple()` checked that all keys map to the same leaf index. We still need
// to ensure that that leaf index is `leaf_index`.
if leaf.index() != leaf_index {
Err(SmtLeafError::MultipleKeysInconsistentWithLeafIndex {
Err(SmtLeafError::InconsistentMultipleLeafIndices {
leaf_index_from_keys: leaf.index(),
leaf_index_supplied: leaf_index,
})
@@ -75,7 +77,7 @@ impl SmtLeaf {
/// - Returns an error if 2 keys in `entries` map to a different leaf index
pub fn new_multiple(entries: Vec<(RpoDigest, Word)>) -> Result<Self, SmtLeafError> {
if entries.len() < 2 {
return Err(SmtLeafError::InvalidNumEntriesForMultiple(entries.len()));
return Err(SmtLeafError::MultipleLeafRequiresTwoEntries(entries.len()));
}
// Check that all keys map to the same leaf index
@@ -89,8 +91,7 @@ impl SmtLeaf {
let next_leaf_index: LeafIndex<SMT_DEPTH> = next_key.into();
if next_leaf_index != first_leaf_index {
return Err(SmtLeafError::InconsistentKeys {
entries,
return Err(SmtLeafError::InconsistentMultipleLeafKeys {
key_1: first_key,
key_2: next_key,
});

View File

@@ -71,12 +71,51 @@ impl Smt {
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
///
/// If the `concurrent` feature is enabled, this function uses a parallel implementation to
/// process the entries efficiently, otherwise it defaults to the sequential implementation.
///
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
///
/// # Errors
/// Returns an error if the provided entries contain multiple values for the same key.
pub fn with_entries(
entries: impl IntoIterator<Item = (RpoDigest, Word)>,
) -> Result<Self, MerkleError> {
#[cfg(feature = "concurrent")]
{
let mut seen_keys = BTreeSet::new();
let entries: Vec<_> = entries
.into_iter()
.map(|(key, value)| {
if seen_keys.insert(key) {
Ok((key, value))
} else {
Err(MerkleError::DuplicateValuesForIndex(
LeafIndex::<SMT_DEPTH>::from(key).value(),
))
}
})
.collect::<Result<_, _>>()?;
if entries.is_empty() {
return Ok(Self::default());
}
<Self as SparseMerkleTree<SMT_DEPTH>>::with_entries_par(entries)
}
#[cfg(not(feature = "concurrent"))]
{
Self::with_entries_sequential(entries)
}
}
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
///
/// This sequential implementation processes entries one at a time to build the tree.
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
///
/// # Errors
/// Returns an error if the provided entries contain multiple values for the same key.
pub fn with_entries_sequential(
entries: impl IntoIterator<Item = (RpoDigest, Word)>,
) -> Result<Self, MerkleError> {
// create an empty tree
let mut tree = Self::new();
@@ -101,6 +140,23 @@ impl Smt {
Ok(tree)
}
/// Returns a new [`Smt`] instantiated from already computed leaves and nodes.
///
/// This function performs minimal consistency checking. It is the caller's responsibility to
/// ensure the passed arguments are correct and consistent with each other.
///
/// # Panics
/// With debug assertions on, this function panics if `root` does not match the root node in
/// `inner_nodes`.
pub fn from_raw_parts(
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
leaves: BTreeMap<u64, SmtLeaf>,
root: RpoDigest,
) -> Self {
// Our particular implementation of `from_raw_parts()` never returns `Err`.
<Self as SparseMerkleTree<SMT_DEPTH>>::from_raw_parts(inner_nodes, leaves, root).unwrap()
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
@@ -260,6 +316,19 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
const EMPTY_ROOT: RpoDigest = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
fn from_raw_parts(
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
leaves: BTreeMap<u64, SmtLeaf>,
root: RpoDigest,
) -> Result<Self, MerkleError> {
if cfg!(debug_assertions) {
let root_node = inner_nodes.get(&NodeIndex::root()).unwrap();
assert_eq!(root_node.hash(), root);
}
Ok(Self { root, inner_nodes, leaves })
}
fn root(&self) -> RpoDigest {
self.root
}
@@ -344,6 +413,23 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
fn path_and_leaf_to_opening(path: MerklePath, leaf: SmtLeaf) -> SmtProof {
SmtProof::new_unchecked(path, leaf)
}
fn pairs_to_leaf(mut pairs: Vec<(RpoDigest, Word)>) -> SmtLeaf {
assert!(!pairs.is_empty());
if pairs.len() > 1 {
SmtLeaf::new_multiple(pairs).unwrap()
} else {
let (key, value) = pairs.pop().unwrap();
// TODO: should we ever be constructing empty leaves from pairs?
if value == Self::EMPTY_VALUE {
let index = Self::key_to_leaf_index(&key);
SmtLeaf::new_empty(index)
} else {
SmtLeaf::new_single(key, value)
}
}
}
}
impl Default for Smt {

View File

@@ -25,7 +25,7 @@ impl SmtProof {
pub fn new(path: MerklePath, leaf: SmtLeaf) -> Result<Self, SmtProofError> {
let depth: usize = SMT_DEPTH.into();
if path.len() != depth {
return Err(SmtProofError::InvalidPathLength(path.len()));
return Err(SmtProofError::InvalidMerklePathLength(path.len()));
}
Ok(Self { path, leaf })

View File

@@ -1,4 +1,7 @@
use alloc::{collections::BTreeMap, vec::Vec};
use core::mem;
use num::Integer;
use super::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, MerklePath, NodeIndex};
use crate::{
@@ -62,6 +65,17 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
// PROVIDED METHODS
// ---------------------------------------------------------------------------------------------
/// Creates a new sparse Merkle tree from an existing set of key-value pairs, in parallel.
#[cfg(feature = "concurrent")]
fn with_entries_par(entries: Vec<(Self::Key, Self::Value)>) -> Result<Self, MerkleError>
where
Self: Sized,
{
let (inner_nodes, leaves) = Self::build_subtrees(entries);
let root = inner_nodes.get(&NodeIndex::root()).unwrap().hash();
Self::from_raw_parts(inner_nodes, leaves, root)
}
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
/// path to the leaf, as well as the leaf itself.
fn open(&self, key: &Self::Key) -> Self::Opening {
@@ -267,7 +281,10 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
// Guard against accidentally trying to apply mutations that were computed against a
// different tree, including a stale version of this tree.
if old_root != self.root() {
return Err(MerkleError::ConflictingRoots(vec![old_root, self.root()]));
return Err(MerkleError::ConflictingRoots {
expected_root: self.root(),
actual_root: old_root,
});
}
for (index, mutation) in node_mutations {
@@ -289,6 +306,16 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
// REQUIRED METHODS
// ---------------------------------------------------------------------------------------------
/// Construct this type from already computed leaves and nodes. The caller ensures passed
/// arguments are correct and consistent with each other.
fn from_raw_parts(
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
leaves: BTreeMap<u64, Self::Leaf>,
root: RpoDigest,
) -> Result<Self, MerkleError>
where
Self: Sized;
/// The root of the tree
fn root(&self) -> RpoDigest;
@@ -338,18 +365,137 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
/// Maps a key to a leaf index
fn key_to_leaf_index(key: &Self::Key) -> LeafIndex<DEPTH>;
/// Constructs a single leaf from an arbitrary amount of key-value pairs.
/// Those pairs must all have the same leaf index.
fn pairs_to_leaf(pairs: Vec<(Self::Key, Self::Value)>) -> Self::Leaf;
/// Maps a (MerklePath, Self::Leaf) to an opening.
///
/// The length `path` is guaranteed to be equal to `DEPTH`
fn path_and_leaf_to_opening(path: MerklePath, leaf: Self::Leaf) -> Self::Opening;
/// Performs the initial transforms for constructing a [`SparseMerkleTree`] by composing
/// subtrees. In other words, this function takes the key-value inputs to the tree, and produces
/// the inputs to feed into [`build_subtree()`].
///
/// `pairs` *must* already be sorted **by leaf index column**, not simply sorted by key. If
/// `pairs` is not correctly sorted, the returned computations will be incorrect.
///
/// # Panics
/// With debug assertions on, this function panics if it detects that `pairs` is not correctly
/// sorted. Without debug assertions, the returned computations will be incorrect.
fn sorted_pairs_to_leaves(
pairs: Vec<(Self::Key, Self::Value)>,
) -> PairComputations<u64, Self::Leaf> {
debug_assert!(pairs.is_sorted_by_key(|(key, _)| Self::key_to_leaf_index(key).value()));
let mut accumulator: PairComputations<u64, Self::Leaf> = Default::default();
let mut accumulated_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(pairs.len() / 2);
// As we iterate, we'll keep track of the kv-pairs we've seen so far that correspond to a
// single leaf. When we see a pair that's in a different leaf, we'll swap these pairs
// out and store them in our accumulated leaves.
let mut current_leaf_buffer: Vec<(Self::Key, Self::Value)> = Default::default();
let mut iter = pairs.into_iter().peekable();
while let Some((key, value)) = iter.next() {
let col = Self::key_to_leaf_index(&key).index.value();
let peeked_col = iter.peek().map(|(key, _v)| {
let index = Self::key_to_leaf_index(key);
let next_col = index.index.value();
// We panic if `pairs` is not sorted by column.
debug_assert!(next_col >= col);
next_col
});
current_leaf_buffer.push((key, value));
// If the next pair is the same column as this one, then we're done after adding this
// pair to the buffer.
if peeked_col == Some(col) {
continue;
}
// Otherwise, the next pair is a different column, or there is no next pair. Either way
// it's time to swap out our buffer.
let leaf_pairs = mem::take(&mut current_leaf_buffer);
let leaf = Self::pairs_to_leaf(leaf_pairs);
let hash = Self::hash_leaf(&leaf);
accumulator.nodes.insert(col, leaf);
accumulated_leaves.push(SubtreeLeaf { col, hash });
debug_assert!(current_leaf_buffer.is_empty());
}
// TODO: determine is there is any notable performance difference between computing
// subtree boundaries after the fact as an iterator adapter (like this), versus computing
// subtree boundaries as we go. Either way this function is only used at the beginning of a
// parallel construction, so it should not be a critical path.
accumulator.leaves = SubtreeLeavesIter::from_leaves(&mut accumulated_leaves).collect();
accumulator
}
/// Computes the raw parts for a new sparse Merkle tree from a set of key-value pairs.
///
/// `entries` need not be sorted. This function will sort them.
#[cfg(feature = "concurrent")]
fn build_subtrees(
mut entries: Vec<(Self::Key, Self::Value)>,
) -> (BTreeMap<NodeIndex, InnerNode>, BTreeMap<u64, Self::Leaf>) {
entries.sort_by_key(|item| {
let index = Self::key_to_leaf_index(&item.0);
index.value()
});
Self::build_subtrees_from_sorted_entries(entries)
}
/// Computes the raw parts for a new sparse Merkle tree from a set of key-value pairs.
///
/// This function is mostly an implementation detail of
/// [`SparseMerkleTree::with_entries_par()`].
#[cfg(feature = "concurrent")]
fn build_subtrees_from_sorted_entries(
entries: Vec<(Self::Key, Self::Value)>,
) -> (BTreeMap<NodeIndex, InnerNode>, BTreeMap<u64, Self::Leaf>) {
use rayon::prelude::*;
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
let PairComputations {
leaves: mut leaf_subtrees,
nodes: initial_leaves,
} = Self::sorted_pairs_to_leaves(entries);
for current_depth in (SUBTREE_DEPTH..=DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
let (nodes, mut subtree_roots): (Vec<BTreeMap<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_par_iter()
.map(|subtree| {
debug_assert!(subtree.is_sorted());
debug_assert!(!subtree.is_empty());
let (nodes, subtree_root) = build_subtree(subtree, DEPTH, current_depth);
(nodes, subtree_root)
})
.unzip();
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
accumulated_nodes.extend(nodes.into_iter().flatten());
debug_assert!(!leaf_subtrees.is_empty());
}
(accumulated_nodes, initial_leaves)
}
}
// INNER NODE
// ================================================================================================
/// This struct is public so functions returning it can be used in `benches/`, but is otherwise not
/// part of the public API.
#[doc(hidden)]
#[derive(Debug, Default, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub(crate) struct InnerNode {
pub struct InnerNode {
pub left: RpoDigest,
pub right: RpoDigest,
}
@@ -403,7 +549,7 @@ impl<const DEPTH: u8> TryFrom<NodeIndex> for LeafIndex<DEPTH> {
fn try_from(node_index: NodeIndex) -> Result<Self, Self::Error> {
if node_index.depth() != DEPTH {
return Err(MerkleError::InvalidDepth {
return Err(MerkleError::InvalidNodeIndexDepth {
expected: DEPTH,
provided: node_index.depth(),
});
@@ -459,3 +605,197 @@ impl<const DEPTH: u8, K, V> MutationSet<DEPTH, K, V> {
self.new_root
}
}
// SUBTREES
// ================================================================================================
/// A subtree is of depth 8.
const SUBTREE_DEPTH: u8 = 8;
/// A depth-8 subtree contains 256 "columns" that can possibly be occupied.
const COLS_PER_SUBTREE: u64 = u64::pow(2, SUBTREE_DEPTH as u32);
/// Helper struct for organizing the data we care about when computing Merkle subtrees.
///
/// Note that these represet "conceptual" leaves of some subtree, not necessarily
/// the leaf type for the sparse Merkle tree.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct SubtreeLeaf {
/// The 'value' field of [`NodeIndex`]. When computing a subtree, the depth is already known.
pub col: u64,
/// The hash of the node this `SubtreeLeaf` represents.
pub hash: RpoDigest,
}
/// Helper struct to organize the return value of [`SparseMerkleTree::sorted_pairs_to_leaves()`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct PairComputations<K, L> {
/// Literal leaves to be added to the sparse Merkle tree's internal mapping.
pub nodes: BTreeMap<K, L>,
/// "Conceptual" leaves that will be used for computations.
pub leaves: Vec<Vec<SubtreeLeaf>>,
}
// Derive requires `L` to impl Default, even though we don't actually need that.
impl<K, L> Default for PairComputations<K, L> {
fn default() -> Self {
Self {
nodes: Default::default(),
leaves: Default::default(),
}
}
}
#[derive(Debug)]
struct SubtreeLeavesIter<'s> {
leaves: core::iter::Peekable<alloc::vec::Drain<'s, SubtreeLeaf>>,
}
impl<'s> SubtreeLeavesIter<'s> {
fn from_leaves(leaves: &'s mut Vec<SubtreeLeaf>) -> Self {
// TODO: determine if there is any notable performance difference between taking a Vec,
// which many need flattening first, vs storing a `Box<dyn Iterator<Item = SubtreeLeaf>>`.
// The latter may have self-referential properties that are impossible to express in purely
// safe Rust Rust.
Self { leaves: leaves.drain(..).peekable() }
}
}
impl core::iter::Iterator for SubtreeLeavesIter<'_> {
type Item = Vec<SubtreeLeaf>;
/// Each `next()` collects an entire subtree.
fn next(&mut self) -> Option<Vec<SubtreeLeaf>> {
let mut subtree: Vec<SubtreeLeaf> = Default::default();
let mut last_subtree_col = 0;
while let Some(leaf) = self.leaves.peek() {
last_subtree_col = u64::max(1, last_subtree_col);
let is_exact_multiple = Integer::is_multiple_of(&last_subtree_col, &COLS_PER_SUBTREE);
let next_subtree_col = if is_exact_multiple {
u64::next_multiple_of(last_subtree_col + 1, COLS_PER_SUBTREE)
} else {
last_subtree_col.next_multiple_of(COLS_PER_SUBTREE)
};
last_subtree_col = leaf.col;
if leaf.col < next_subtree_col {
subtree.push(self.leaves.next().unwrap());
} else if subtree.is_empty() {
continue;
} else {
break;
}
}
if subtree.is_empty() {
debug_assert!(self.leaves.peek().is_none());
return None;
}
Some(subtree)
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Builds Merkle nodes from a bottom layer of "leaves" -- represented by a horizontal index and
/// the hash of the leaf at that index. `leaves` *must* be sorted by horizontal index, and
/// `leaves` must not contain more than one depth-8 subtree's worth of leaves.
///
/// This function will then calculate the inner nodes above each leaf for 8 layers, as well as
/// the "leaves" for the next 8-deep subtree, so this function can effectively be chained into
/// itself.
///
/// # Panics
/// With debug assertions on, this function panics under invalid inputs: if `leaves` contains
/// more entries than can fit in a depth-8 subtree, if `leaves` contains leaves belonging to
/// different depth-8 subtrees, if `bottom_depth` is lower in the tree than the specified
/// maximum depth (`DEPTH`), or if `leaves` is not sorted.
fn build_subtree(
mut leaves: Vec<SubtreeLeaf>,
tree_depth: u8,
bottom_depth: u8,
) -> (BTreeMap<NodeIndex, InnerNode>, SubtreeLeaf) {
debug_assert!(bottom_depth <= tree_depth);
debug_assert!(Integer::is_multiple_of(&bottom_depth, &SUBTREE_DEPTH));
debug_assert!(leaves.len() <= usize::pow(2, SUBTREE_DEPTH as u32));
let subtree_root = bottom_depth - SUBTREE_DEPTH;
let mut inner_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
let mut next_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(leaves.len() / 2);
for next_depth in (subtree_root..bottom_depth).rev() {
debug_assert!(next_depth <= bottom_depth);
// `next_depth` is the stuff we're making.
// `current_depth` is the stuff we have.
let current_depth = next_depth + 1;
let mut iter = leaves.drain(..).peekable();
while let Some(first) = iter.next() {
// On non-continuous iterations, including the first iteration, `first_column` may
// be a left or right node. On subsequent continuous iterations, we will always call
// `iter.next()` twice.
// On non-continuous iterations (including the very first iteration), this column
// could be either on the left or the right. If the next iteration is not
// discontinuous with our right node, then the next iteration's
let is_right = first.col.is_odd();
let (left, right) = if is_right {
// Discontinuous iteration: we have no left node, so it must be empty.
let left = SubtreeLeaf {
col: first.col - 1,
hash: *EmptySubtreeRoots::entry(tree_depth, current_depth),
};
let right = first;
(left, right)
} else {
let left = first;
let right_col = first.col + 1;
let right = match iter.peek().copied() {
Some(SubtreeLeaf { col, .. }) if col == right_col => {
// Our inputs must be sorted.
debug_assert!(left.col <= col);
// The next leaf in the iterator is our sibling. Use it and consume it!
iter.next().unwrap()
},
// Otherwise, the leaves don't contain our sibling, so our sibling must be
// empty.
_ => SubtreeLeaf {
col: right_col,
hash: *EmptySubtreeRoots::entry(tree_depth, current_depth),
},
};
(left, right)
};
let index = NodeIndex::new_unchecked(current_depth, left.col).parent();
let node = InnerNode { left: left.hash, right: right.hash };
let hash = node.hash();
let &equivalent_empty_hash = EmptySubtreeRoots::entry(tree_depth, next_depth);
// If this hash is empty, then it doesn't become a new inner node, nor does it count
// as a leaf for the next depth.
if hash != equivalent_empty_hash {
inner_nodes.insert(index, node);
next_leaves.push(SubtreeLeaf { col: index.value(), hash });
}
}
// Stop borrowing `leaves`, so we can swap it.
// The iterator is empty at this point anyway.
drop(iter);
// After each depth, consider the stuff we just made the new "leaves", and empty the
// other collection.
mem::swap(&mut leaves, &mut next_leaves);
}
debug_assert_eq!(leaves.len(), 1);
let root = leaves.pop().unwrap();
(inner_nodes, root)
}
#[cfg(feature = "internal")]
pub fn build_subtree_for_bench(
leaves: Vec<SubtreeLeaf>,
tree_depth: u8,
bottom_depth: u8,
) -> (BTreeMap<NodeIndex, InnerNode>, SubtreeLeaf) {
build_subtree(leaves, tree_depth, bottom_depth)
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests;

View File

@@ -1,4 +1,7 @@
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use super::{
super::ValuePath, EmptySubtreeRoots, InnerNode, InnerNodeInfo, LeafIndex, MerkleError,
@@ -81,7 +84,7 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
for (idx, (key, value)) in entries.into_iter().enumerate() {
if idx >= max_num_entries {
return Err(MerkleError::InvalidNumEntries(max_num_entries));
return Err(MerkleError::TooManyEntries(max_num_entries));
}
let old_value = tree.insert(LeafIndex::<DEPTH>::new(key)?, value);
@@ -97,6 +100,23 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
Ok(tree)
}
/// Returns a new [`SimpleSmt`] instantiated from already computed leaves and nodes.
///
/// This function performs minimal consistency checking. It is the caller's responsibility to
/// ensure the passed arguments are correct and consistent with each other.
///
/// # Panics
/// With debug assertions on, this function panics if `root` does not match the root node in
/// `inner_nodes`.
pub fn from_raw_parts(
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
leaves: BTreeMap<u64, Word>,
root: RpoDigest,
) -> Self {
// Our particular implementation of `from_raw_parts()` never returns `Err`.
<Self as SparseMerkleTree<DEPTH>>::from_raw_parts(inner_nodes, leaves, root).unwrap()
}
/// Wrapper around [`SimpleSmt::with_leaves`] which inserts leaves at contiguous indices
/// starting at index 0.
pub fn with_contiguous_leaves(
@@ -246,7 +266,7 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
subtree: SimpleSmt<SUBTREE_DEPTH>,
) -> Result<RpoDigest, MerkleError> {
if SUBTREE_DEPTH > DEPTH {
return Err(MerkleError::InvalidSubtreeDepth {
return Err(MerkleError::SubtreeDepthExceedsDepth {
subtree_depth: SUBTREE_DEPTH,
tree_depth: DEPTH,
});
@@ -306,6 +326,19 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
const EMPTY_ROOT: RpoDigest = *EmptySubtreeRoots::entry(DEPTH, 0);
fn from_raw_parts(
inner_nodes: BTreeMap<NodeIndex, InnerNode>,
leaves: BTreeMap<u64, Word>,
root: RpoDigest,
) -> Result<Self, MerkleError> {
if cfg!(debug_assertions) {
let root_node = inner_nodes.get(&NodeIndex::root()).unwrap();
assert_eq!(root_node.hash(), root);
}
Ok(Self { root, inner_nodes, leaves })
}
fn root(&self) -> RpoDigest {
self.root
}
@@ -370,4 +403,11 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
fn path_and_leaf_to_opening(path: MerklePath, leaf: Word) -> ValuePath {
(path, leaf).into()
}
fn pairs_to_leaf(mut pairs: Vec<(LeafIndex<DEPTH>, Word)>) -> Word {
// SimpleSmt can't have more than one value per key.
assert_eq!(pairs.len(), 1);
let (_key, value) = pairs.pop().unwrap();
value
}
}

View File

@@ -1,5 +1,7 @@
use alloc::vec::Vec;
use assert_matches::assert_matches;
use super::{
super::{MerkleError, RpoDigest, SimpleSmt},
NodeIndex,
@@ -257,12 +259,12 @@ fn test_simplesmt_fail_on_duplicates() {
// consecutive
let entries = [(1, *first), (1, *second)];
let smt = SimpleSmt::<64>::with_leaves(entries);
assert_eq!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
assert_matches!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
// not consecutive
let entries = [(1, *first), (5, int_to_leaf(5)), (1, *second)];
let smt = SimpleSmt::<64>::with_leaves(entries);
assert_eq!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
assert_matches!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
}
}

417
src/merkle/smt/tests.rs Normal file
View File

@@ -0,0 +1,417 @@
use alloc::{collections::BTreeMap, vec::Vec};
use super::{
build_subtree, InnerNode, LeafIndex, NodeIndex, PairComputations, SmtLeaf, SparseMerkleTree,
SubtreeLeaf, SubtreeLeavesIter, COLS_PER_SUBTREE, SUBTREE_DEPTH,
};
use crate::{
hash::rpo::RpoDigest,
merkle::{Smt, SMT_DEPTH},
Felt, Word, ONE,
};
fn smtleaf_to_subtree_leaf(leaf: &SmtLeaf) -> SubtreeLeaf {
SubtreeLeaf {
col: leaf.index().index.value(),
hash: leaf.hash(),
}
}
#[test]
fn test_sorted_pairs_to_leaves() {
let entries: Vec<(RpoDigest, Word)> = vec![
// Subtree 0.
(RpoDigest::new([ONE, ONE, ONE, Felt::new(16)]), [ONE; 4]),
(RpoDigest::new([ONE, ONE, ONE, Felt::new(17)]), [ONE; 4]),
// Leaf index collision.
(RpoDigest::new([ONE, ONE, Felt::new(10), Felt::new(20)]), [ONE; 4]),
(RpoDigest::new([ONE, ONE, Felt::new(20), Felt::new(20)]), [ONE; 4]),
// Subtree 1. Normal single leaf again.
(RpoDigest::new([ONE, ONE, ONE, Felt::new(400)]), [ONE; 4]), // Subtree boundary.
(RpoDigest::new([ONE, ONE, ONE, Felt::new(401)]), [ONE; 4]),
// Subtree 2. Another normal leaf.
(RpoDigest::new([ONE, ONE, ONE, Felt::new(1024)]), [ONE; 4]),
];
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let control_leaves: Vec<SmtLeaf> = {
let mut entries_iter = entries.iter().cloned();
let mut next_entry = || entries_iter.next().unwrap();
let control_leaves = vec![
// Subtree 0.
SmtLeaf::Single(next_entry()),
SmtLeaf::Single(next_entry()),
SmtLeaf::new_multiple(vec![next_entry(), next_entry()]).unwrap(),
// Subtree 1.
SmtLeaf::Single(next_entry()),
SmtLeaf::Single(next_entry()),
// Subtree 2.
SmtLeaf::Single(next_entry()),
];
assert_eq!(entries_iter.next(), None);
control_leaves
};
let control_subtree_leaves: Vec<Vec<SubtreeLeaf>> = {
let mut control_leaves_iter = control_leaves.iter();
let mut next_leaf = || control_leaves_iter.next().unwrap();
let control_subtree_leaves: Vec<Vec<SubtreeLeaf>> = [
// Subtree 0.
vec![next_leaf(), next_leaf(), next_leaf()],
// Subtree 1.
vec![next_leaf(), next_leaf()],
// Subtree 2.
vec![next_leaf()],
]
.map(|subtree| subtree.into_iter().map(smtleaf_to_subtree_leaf).collect())
.to_vec();
assert_eq!(control_leaves_iter.next(), None);
control_subtree_leaves
};
let subtrees: PairComputations<u64, SmtLeaf> = Smt::sorted_pairs_to_leaves(entries);
// This will check that the hashes, columns, and subtree assignments all match.
assert_eq!(subtrees.leaves, control_subtree_leaves);
// Flattening and re-separating out the leaves into subtrees should have the same result.
let mut all_leaves: Vec<SubtreeLeaf> = subtrees.leaves.clone().into_iter().flatten().collect();
let re_grouped: Vec<Vec<_>> = SubtreeLeavesIter::from_leaves(&mut all_leaves).collect();
assert_eq!(subtrees.leaves, re_grouped);
// Then finally we might as well check the computed leaf nodes too.
let control_leaves: BTreeMap<u64, SmtLeaf> = control
.leaves()
.map(|(index, value)| (index.index.value(), value.clone()))
.collect();
for (column, test_leaf) in subtrees.nodes {
if test_leaf.is_empty() {
continue;
}
let control_leaf = control_leaves
.get(&column)
.unwrap_or_else(|| panic!("no leaf node found for column {column}"));
assert_eq!(control_leaf, &test_leaf);
}
}
// Helper for the below tests.
fn generate_entries(pair_count: u64) -> Vec<(RpoDigest, Word)> {
(0..pair_count)
.map(|i| {
let leaf_index = ((i as f64 / pair_count as f64) * (pair_count as f64)) as u64;
let key = RpoDigest::new([ONE, ONE, Felt::new(i), Felt::new(leaf_index)]);
let value = [ONE, ONE, ONE, Felt::new(i)];
(key, value)
})
.collect()
}
#[test]
fn test_single_subtree() {
// A single subtree's worth of leaves.
const PAIR_COUNT: u64 = COLS_PER_SUBTREE;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
// `entries` should already be sorted by nature of how we constructed it.
let leaves = Smt::sorted_pairs_to_leaves(entries).leaves;
let leaves = leaves.into_iter().next().unwrap();
let (first_subtree, subtree_root) = build_subtree(leaves, SMT_DEPTH, SMT_DEPTH);
assert!(!first_subtree.is_empty());
// The inner nodes computed from that subtree should match the nodes in our control tree.
for (index, node) in first_subtree.into_iter() {
let control = control.get_inner_node(index);
assert_eq!(
control, node,
"subtree-computed node at index {index:?} does not match control",
);
}
// The root returned should also match the equivalent node in the control tree.
let control_root_index =
NodeIndex::new(SMT_DEPTH - SUBTREE_DEPTH, subtree_root.col).expect("Valid root index");
let control_root_node = control.get_inner_node(control_root_index);
let control_hash = control_root_node.hash();
assert_eq!(
control_hash, subtree_root.hash,
"Subtree-computed root at index {control_root_index:?} does not match control"
);
}
// Test that not just can we compute a subtree correctly, but we can feed the results of one
// subtree into computing another. In other words, test that `build_subtree()` is correctly
// composable.
#[test]
fn test_two_subtrees() {
// Two subtrees' worth of leaves.
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 2;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let PairComputations { leaves, .. } = Smt::sorted_pairs_to_leaves(entries);
// With two subtrees' worth of leaves, we should have exactly two subtrees.
let [first, second]: [Vec<_>; 2] = leaves.try_into().unwrap();
assert_eq!(first.len() as u64, PAIR_COUNT / 2);
assert_eq!(first.len(), second.len());
let mut current_depth = SMT_DEPTH;
let mut next_leaves: Vec<SubtreeLeaf> = Default::default();
let (first_nodes, first_root) = build_subtree(first, SMT_DEPTH, current_depth);
next_leaves.push(first_root);
let (second_nodes, second_root) = build_subtree(second, SMT_DEPTH, current_depth);
next_leaves.push(second_root);
// All new inner nodes + the new subtree-leaves should be 512, for one depth-cycle.
let total_computed = first_nodes.len() + second_nodes.len() + next_leaves.len();
assert_eq!(total_computed as u64, PAIR_COUNT);
// Verify the computed nodes of both subtrees.
let computed_nodes = first_nodes.clone().into_iter().chain(second_nodes);
for (index, test_node) in computed_nodes {
let control_node = control.get_inner_node(index);
assert_eq!(
control_node, test_node,
"subtree-computed node at index {index:?} does not match control",
);
}
current_depth -= SUBTREE_DEPTH;
let (nodes, root_leaf) = build_subtree(next_leaves, SMT_DEPTH, current_depth);
assert_eq!(nodes.len(), SUBTREE_DEPTH as usize);
assert_eq!(root_leaf.col, 0);
for (index, test_node) in nodes {
let control_node = control.get_inner_node(index);
assert_eq!(
control_node, test_node,
"subtree-computed node at index {index:?} does not match control",
);
}
let index = NodeIndex::new(current_depth - SUBTREE_DEPTH, root_leaf.col).unwrap();
let control_root = control.get_inner_node(index).hash();
assert_eq!(control_root, root_leaf.hash, "Root mismatch");
}
#[test]
fn test_singlethreaded_subtrees() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
let PairComputations {
leaves: mut leaf_subtrees,
nodes: test_leaves,
} = Smt::sorted_pairs_to_leaves(entries);
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
// There's no flat_map_unzip(), so this is the best we can do.
let (nodes, mut subtree_roots): (Vec<BTreeMap<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_iter()
.enumerate()
.map(|(i, subtree)| {
// Pre-assertions.
assert!(
subtree.is_sorted(),
"subtree {i} at bottom-depth {current_depth} is not sorted",
);
assert!(
!subtree.is_empty(),
"subtree {i} at bottom-depth {current_depth} is empty!",
);
// Do actual things.
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
// Post-assertions.
for (&index, test_node) in nodes.iter() {
let control_node = control.get_inner_node(index);
assert_eq!(
test_node, &control_node,
"depth {} subtree {}: test node does not match control at index {:?}",
current_depth, i, index,
);
}
(nodes, subtree_root)
})
.unzip();
// Update state between each depth iteration.
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
accumulated_nodes.extend(nodes.into_iter().flatten());
assert!(!leaf_subtrees.is_empty(), "on depth {current_depth}");
}
// Make sure the true leaves match, first checking length and then checking each individual
// leaf.
let control_leaves: BTreeMap<_, _> = control.leaves().collect();
let control_leaves_len = control_leaves.len();
let test_leaves_len = test_leaves.len();
assert_eq!(test_leaves_len, control_leaves_len);
for (col, ref test_leaf) in test_leaves {
let index = LeafIndex::new_max_depth(col);
let &control_leaf = control_leaves.get(&index).unwrap();
assert_eq!(test_leaf, control_leaf, "test leaf at column {col} does not match control");
}
// Make sure the inner nodes match, checking length first and then each individual leaf.
let control_nodes_len = control.inner_nodes().count();
let test_nodes_len = accumulated_nodes.len();
assert_eq!(test_nodes_len, control_nodes_len);
for (index, test_node) in accumulated_nodes.clone() {
let control_node = control.get_inner_node(index);
assert_eq!(test_node, control_node, "test node does not match control at {index:?}");
}
// After the last iteration of the above for loop, we should have the new root node actually
// in two places: one in `accumulated_nodes`, and the other as the "next leaves" return from
// `build_subtree()`. So let's check both!
let control_root = control.get_inner_node(NodeIndex::root());
// That for loop should have left us with only one leaf subtree...
let [leaf_subtree]: [Vec<_>; 1] = leaf_subtrees.try_into().unwrap();
// which itself contains only one 'leaf'...
let [root_leaf]: [SubtreeLeaf; 1] = leaf_subtree.try_into().unwrap();
// which matches the expected root.
assert_eq!(control.root(), root_leaf.hash);
// Likewise `accumulated_nodes` should contain a node at the root index...
assert!(accumulated_nodes.contains_key(&NodeIndex::root()));
// and it should match our actual root.
let test_root = accumulated_nodes.get(&NodeIndex::root()).unwrap();
assert_eq!(control_root, *test_root);
// And of course the root we got from each place should match.
assert_eq!(control.root(), root_leaf.hash);
}
/// The parallel version of `test_singlethreaded_subtree()`.
#[test]
#[cfg(feature = "concurrent")]
fn test_multithreaded_subtrees() {
use rayon::prelude::*;
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
let PairComputations {
leaves: mut leaf_subtrees,
nodes: test_leaves,
} = Smt::sorted_pairs_to_leaves(entries);
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
let (nodes, mut subtree_roots): (Vec<BTreeMap<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_par_iter()
.enumerate()
.map(|(i, subtree)| {
// Pre-assertions.
assert!(
subtree.is_sorted(),
"subtree {i} at bottom-depth {current_depth} is not sorted",
);
assert!(
!subtree.is_empty(),
"subtree {i} at bottom-depth {current_depth} is empty!",
);
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
// Post-assertions.
for (&index, test_node) in nodes.iter() {
let control_node = control.get_inner_node(index);
assert_eq!(
test_node, &control_node,
"depth {} subtree {}: test node does not match control at index {:?}",
current_depth, i, index,
);
}
(nodes, subtree_root)
})
.unzip();
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
accumulated_nodes.extend(nodes.into_iter().flatten());
assert!(!leaf_subtrees.is_empty(), "on depth {current_depth}");
}
// Make sure the true leaves match, checking length first and then each individual leaf.
let control_leaves: BTreeMap<_, _> = control.leaves().collect();
let control_leaves_len = control_leaves.len();
let test_leaves_len = test_leaves.len();
assert_eq!(test_leaves_len, control_leaves_len);
for (col, ref test_leaf) in test_leaves {
let index = LeafIndex::new_max_depth(col);
let &control_leaf = control_leaves.get(&index).unwrap();
assert_eq!(test_leaf, control_leaf);
}
// Make sure the inner nodes match, checking length first and then each individual leaf.
let control_nodes_len = control.inner_nodes().count();
let test_nodes_len = accumulated_nodes.len();
assert_eq!(test_nodes_len, control_nodes_len);
for (index, test_node) in accumulated_nodes.clone() {
let control_node = control.get_inner_node(index);
assert_eq!(test_node, control_node, "test node does not match control at {index:?}");
}
// After the last iteration of the above for loop, we should have the new root node actually
// in two places: one in `accumulated_nodes`, and the other as the "next leaves" return from
// `build_subtree()`. So let's check both!
let control_root = control.get_inner_node(NodeIndex::root());
// That for loop should have left us with only one leaf subtree...
let [leaf_subtree]: [_; 1] = leaf_subtrees.try_into().unwrap();
// which itself contains only one 'leaf'...
let [root_leaf]: [_; 1] = leaf_subtree.try_into().unwrap();
// which matches the expected root.
assert_eq!(control.root(), root_leaf.hash);
// Likewise `accumulated_nodes` should contain a node at the root index...
assert!(accumulated_nodes.contains_key(&NodeIndex::root()));
// and it should match our actual root.
let test_root = accumulated_nodes.get(&NodeIndex::root()).unwrap();
assert_eq!(control_root, *test_root);
// And of course the root we got from each place should match.
assert_eq!(control.root(), root_leaf.hash);
}
#[test]
#[cfg(feature = "concurrent")]
fn test_with_entries_parallel() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let smt = Smt::with_entries(entries.clone()).unwrap();
assert_eq!(smt.root(), control.root());
assert_eq!(smt, control);
}

View File

@@ -136,7 +136,10 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
for i in (0..index.depth()).rev() {
let node = self.nodes.get(&hash).ok_or(MerkleError::NodeNotInStore(hash, index))?;
let node = self
.nodes
.get(&hash)
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
let bit = (index.value() >> i) & 1;
hash = if bit == 0 { node.left } else { node.right }
@@ -162,7 +165,10 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
for i in (0..index.depth()).rev() {
let node = self.nodes.get(&hash).ok_or(MerkleError::NodeNotInStore(hash, index))?;
let node = self
.nodes
.get(&hash)
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
let bit = (index.value() >> i) & 1;
hash = if bit == 0 {

View File

@@ -1,3 +1,4 @@
use assert_matches::assert_matches;
use seq_macro::seq;
#[cfg(feature = "std")]
use {
@@ -42,14 +43,14 @@ const VALUES8: [RpoDigest; 8] = [
fn test_root_not_in_store() -> Result<(), MerkleError> {
let mtree = MerkleTree::new(digests_to_words(&VALUES4))?;
let store = MerkleStore::from(&mtree);
assert_eq!(
assert_matches!(
store.get_node(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
Err(MerkleError::RootNotInStore(VALUES4[0])),
Err(MerkleError::RootNotInStore(root)) if root == VALUES4[0],
"Leaf 0 is not a root"
);
assert_eq!(
assert_matches!(
store.get_path(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
Err(MerkleError::RootNotInStore(VALUES4[0])),
Err(MerkleError::RootNotInStore(root)) if root == VALUES4[0],
"Leaf 0 is not a root"
);
@@ -64,46 +65,46 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
// STORE LEAVES ARE CORRECT -------------------------------------------------------------------
// checks the leaves in the store corresponds to the expected values
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)),
Ok(VALUES4[0]),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(),
VALUES4[0],
"node 0 must be in the tree"
);
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)),
Ok(VALUES4[1]),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap(),
VALUES4[1],
"node 1 must be in the tree"
);
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)),
Ok(VALUES4[2]),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap(),
VALUES4[2],
"node 2 must be in the tree"
);
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)),
Ok(VALUES4[3]),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap(),
VALUES4[3],
"node 3 must be in the tree"
);
// STORE LEAVES MATCH TREE --------------------------------------------------------------------
// sanity check the values returned by the store and the tree
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 0)),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)),
mtree.get_node(NodeIndex::make(mtree.depth(), 0)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(),
"node 0 must be the same for both MerkleTree and MerkleStore"
);
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 1)),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)),
mtree.get_node(NodeIndex::make(mtree.depth(), 1)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap(),
"node 1 must be the same for both MerkleTree and MerkleStore"
);
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 2)),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)),
mtree.get_node(NodeIndex::make(mtree.depth(), 2)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap(),
"node 2 must be the same for both MerkleTree and MerkleStore"
);
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 3)),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)),
mtree.get_node(NodeIndex::make(mtree.depth(), 3)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap(),
"node 3 must be the same for both MerkleTree and MerkleStore"
);
@@ -115,8 +116,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 0)),
Ok(result.path),
mtree.get_path(NodeIndex::make(mtree.depth(), 0)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
@@ -126,8 +127,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 1)),
Ok(result.path),
mtree.get_path(NodeIndex::make(mtree.depth(), 1)).unwrap(),
result.path,
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
);
@@ -137,8 +138,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 2)),
Ok(result.path),
mtree.get_path(NodeIndex::make(mtree.depth(), 2)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
@@ -148,8 +149,8 @@ fn test_merkle_tree() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 3)),
Ok(result.path),
mtree.get_path(NodeIndex::make(mtree.depth(), 3)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
@@ -240,56 +241,56 @@ fn test_sparse_merkle_tree() -> Result<(), MerkleError> {
// STORE LEAVES ARE CORRECT ==============================================================
// checks the leaves in the store corresponds to the expected values
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)),
Ok(VALUES4[0]),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
VALUES4[0],
"node 0 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)),
Ok(VALUES4[1]),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
VALUES4[1],
"node 1 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)),
Ok(VALUES4[2]),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
VALUES4[2],
"node 2 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)),
Ok(VALUES4[3]),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
VALUES4[3],
"node 3 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)),
Ok(RpoDigest::default()),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
RpoDigest::default(),
"unmodified node 4 must be ZERO"
);
// STORE LEAVES MATCH TREE ===============================================================
// sanity check the values returned by the store and the tree
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 0)),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)),
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
"node 0 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 1)),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)),
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
"node 1 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 2)),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)),
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
"node 2 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 3)),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)),
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
"node 3 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 4)),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)),
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
"node 4 must be the same for both SparseMerkleTree and MerkleStore"
);
@@ -385,46 +386,46 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
// STORE LEAVES ARE CORRECT ==============================================================
// checks the leaves in the store corresponds to the expected values
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)),
Ok(VALUES4[0]),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
VALUES4[0],
"node 0 must be in the pmt"
);
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)),
Ok(VALUES4[1]),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
VALUES4[1],
"node 1 must be in the pmt"
);
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)),
Ok(VALUES4[2]),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
VALUES4[2],
"node 2 must be in the pmt"
);
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)),
Ok(VALUES4[3]),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
VALUES4[3],
"node 3 must be in the pmt"
);
// STORE LEAVES MATCH PMT ================================================================
// sanity check the values returned by the store and the pmt
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 0)),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)),
pmt.get_node(NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
"node 0 must be the same for both PartialMerkleTree and MerkleStore"
);
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 1)),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)),
pmt.get_node(NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
"node 1 must be the same for both PartialMerkleTree and MerkleStore"
);
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 2)),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)),
pmt.get_node(NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
"node 2 must be the same for both PartialMerkleTree and MerkleStore"
);
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 3)),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)),
pmt.get_node(NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
"node 3 must be the same for both PartialMerkleTree and MerkleStore"
);
@@ -436,8 +437,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 0)),
Ok(result.path),
pmt.get_path(NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
@@ -447,8 +448,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 1)),
Ok(result.path),
pmt.get_path(NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
result.path,
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
);
@@ -458,8 +459,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 2)),
Ok(result.path),
pmt.get_path(NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
@@ -469,8 +470,8 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> {
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 3)),
Ok(result.path),
pmt.get_path(NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
@@ -498,7 +499,7 @@ fn wont_open_to_different_depth_root() {
let store = MerkleStore::from(&mtree);
let index = NodeIndex::root();
let err = store.get_node(root, index).err().unwrap();
assert_eq!(err, MerkleError::RootNotInStore(root));
assert_matches!(err, MerkleError::RootNotInStore(err_root) if err_root == root);
}
#[test]
@@ -537,7 +538,7 @@ fn test_set_node() -> Result<(), MerkleError> {
let value = int_to_node(42);
let index = NodeIndex::make(mtree.depth(), 0);
let new_root = store.set_node(mtree.root(), index, value)?.root;
assert_eq!(store.get_node(new_root, index), Ok(value), "Value must have changed");
assert_eq!(store.get_node(new_root, index).unwrap(), value, "value must have changed");
Ok(())
}
@@ -745,7 +746,7 @@ fn get_leaf_depth_works_with_depth_8() {
// duplicate the tree on `a` and assert the depth is short-circuited by such sub-tree
let index = NodeIndex::new(8, a).unwrap();
root = store.set_node(root, index, root).unwrap().root;
assert_eq!(Err(MerkleError::DepthTooBig(9)), store.get_leaf_depth(root, 8, a));
assert_matches!(store.get_leaf_depth(root, 8, a).unwrap_err(), MerkleError::DepthTooBig(9));
}
#[test]

View File

@@ -145,8 +145,10 @@ impl RandomCoin for RpoRandomCoin {
self.state[RATE_START] += nonce;
Rpo256::apply_permutation(&mut self.state);
// reset the buffer
self.current = RATE_START;
// reset the buffer and move the next random element pointer to the second rate element.
// this is done as the first rate element will be "biased" via the provided `nonce` to
// contain some number of leading zeros.
self.current = RATE_START + 1;
// determine how many bits are needed to represent valid values in the domain
let v_mask = (domain_size - 1) as u64;
@@ -172,6 +174,36 @@ impl RandomCoin for RpoRandomCoin {
Ok(values)
}
fn reseed_with_salt(
&mut self,
data: <Self::Hasher as winter_crypto::Hasher>::Digest,
salt: Option<<Self::Hasher as winter_crypto::Hasher>::Digest>,
) {
// Reset buffer
self.current = RATE_START;
// Add the new seed material to the first half of the rate portion of the RPO state
let data: Word = data.into();
self.state[RATE_START] += data[0];
self.state[RATE_START + 1] += data[1];
self.state[RATE_START + 2] += data[2];
self.state[RATE_START + 3] += data[3];
if let Some(salt) = salt {
// Add the salt to the second half of the rate portion of the RPO state
let data: Word = salt.into();
self.state[RATE_START + 4] += data[0];
self.state[RATE_START + 5] += data[1];
self.state[RATE_START + 6] += data[2];
self.state[RATE_START + 7] += data[3];
}
// Absorb
Rpo256::apply_permutation(&mut self.state);
}
}
// FELT RNG IMPLEMENTATION

View File

@@ -172,6 +172,36 @@ impl RandomCoin for RpxRandomCoin {
Ok(values)
}
fn reseed_with_salt(
&mut self,
data: <Self::Hasher as winter_crypto::Hasher>::Digest,
salt: Option<<Self::Hasher as winter_crypto::Hasher>::Digest>,
) {
// Reset buffer
self.current = RATE_START;
// Add the new seed material to the first half of the rate portion of the RPO state
let data: Word = data.into();
self.state[RATE_START] += data[0];
self.state[RATE_START + 1] += data[1];
self.state[RATE_START + 2] += data[2];
self.state[RATE_START + 3] += data[3];
if let Some(salt) = salt {
// Add the salt to the second half of the rate portion of the RPO state
let data: Word = salt.into();
self.state[RATE_START + 4] += data[0];
self.state[RATE_START + 5] += data[1];
self.state[RATE_START + 6] += data[2];
self.state[RATE_START + 7] += data[3];
}
// Absorb
Rpx256::apply_permutation(&mut self.state);
}
}
// FELT RNG IMPLEMENTATION

View File

@@ -1,7 +1,9 @@
//! Utilities used in this crate which can also be generally useful downstream.
use alloc::string::String;
use core::fmt::{self, Display, Write};
use core::fmt::{self, Write};
use thiserror::Error;
use super::Word;
@@ -46,36 +48,20 @@ pub fn bytes_to_hex_string<const N: usize>(data: [u8; N]) -> String {
}
/// Defines errors which can occur during parsing of hexadecimal strings.
#[derive(Debug)]
#[derive(Debug, Error)]
pub enum HexParseError {
#[error(
"expected hex data to have length {expected}, including the 0x prefix, found {actual}"
)]
InvalidLength { expected: usize, actual: usize },
#[error("hex encoded data must start with 0x prefix")]
MissingPrefix,
#[error("hex encoded data must contain only characters [a-zA-Z0-9]")]
InvalidChar,
#[error("hex encoded values of a Digest must be inside the field modulus")]
OutOfRange,
}
impl Display for HexParseError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
HexParseError::InvalidLength { expected, actual } => {
write!(f, "Expected hex data to have length {expected}, including the 0x prefix. Got {actual}")
},
HexParseError::MissingPrefix => {
write!(f, "Hex encoded data must start with 0x prefix")
},
HexParseError::InvalidChar => {
write!(f, "Hex encoded data must contain characters [a-zA-Z0-9]")
},
HexParseError::OutOfRange => {
write!(f, "Hex encoded values of an RpoDigest must be inside the field modulus")
},
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for HexParseError {}
/// Parses a hex string into an array of bytes of known size.
pub fn hex_to_bytes<const N: usize>(value: &str) -> Result<[u8; N], HexParseError> {
let expected: usize = (N * 2) + 2;