diff --git a/CLAUDE.md b/CLAUDE.md index 4d92446c2..8f0ccd02c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -50,6 +50,11 @@ scripts/safe-cargo.sh +nightly fmt --all **Dash Evo Tool** is a cross-platform GUI application (Rust + egui) for interacting with Dash Evolution. It enables DPNS username registration, contest voting, state transition viewing, wallet management, and identity operations across Mainnet/Testnet/Devnet. +## Documentation + +- **docs/ai-design** should contain architecture and technical design files, grouped in subdirectories prefixed with ISO-formatted date +- end-user documentation is in a separate repo: https://github.com/dashpay/docs/tree/HEAD/docs/user/network/dash-evo-tool , published at https://docs.dash.org/en/stable/docs/user/network/dash-evo-tool/ + ### Core Module Structure - **app.rs** - `AppState`: owns all screens, polls task results each frame, dispatches to visible screen diff --git a/Cargo.lock b/Cargo.lock index dfe01d188..d1a8f4e41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -199,7 +199,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef6978589202a00cd7e118380c448a08b6ed394c3a8df3a430d0898e3a42d046" dependencies = [ "android-properties", - "bitflags 2.10.0", + "bitflags 2.11.0", "cc", "cesu8", "jni", @@ -306,9 +306,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ded5f9a03ac8f24d1b8a25101ee812cd32cdc8c50a4c50237de2c4915850e73" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" dependencies = [ "rustversion", ] @@ -477,9 +477,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.3" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" +checksum = "c96bf972d85afc50bf5ab8fe2d54d1586b4e0b46c97c50a0c9e71e2f7bcd812a" dependencies = [ "async-task", "concurrent-queue", @@ -581,7 +581,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -642,7 +642,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -855,7 +855,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.115", + "syn 2.0.116", "which 4.4.2", ] @@ -943,9 +943,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" dependencies = [ "serde_core", ] @@ -1127,7 +1127,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -1157,7 +1157,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b99da2f8558ca23c71f4fd15dc57c906239752dd27ff3c00a1d56b685b7cbfec" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "log", "polling", "rustix 0.38.44", @@ -1167,11 +1167,11 @@ dependencies = [ [[package]] name = "calloop" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9f6e1368bd4621d2c86baa7e37de77a938adf5221e5dd3d6133340101b309e" +checksum = "4dbf9978365bac10f54d1d4b04f7ce4427e51f71d61f2fe15e3fed5166474df7" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "polling", "rustix 1.1.3", "slab", @@ -1196,7 +1196,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138efcf0940a02ebf0cc8d1eff41a1682a46b431630f4c52450d6265876021fa" dependencies = [ - "calloop 0.14.3", + "calloop 0.14.4", "rustix 1.1.3", "wayland-backend", "wayland-client", @@ -1364,9 +1364,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.58" +version = "4.5.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63be97961acde393029492ce0be7a1af7e323e6bae9511ebfac33751be5e6806" +checksum = "c5caf74d17c3aec5495110c34cc3f78644bfa89af6c8993ed4de2790e49b6499" dependencies = [ "clap_builder", "clap_derive", @@ -1374,9 +1374,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.58" +version = "4.5.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f13174bda5dfd69d7e947827e5af4b0f2f94a4a3ee92912fba07a66150f21e2" +checksum = "370daa45065b80218950227371916a1633217ae42b2715b2287b606dcd618e24" dependencies = [ "anstream", "anstyle", @@ -1393,7 +1393,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -1539,7 +1539,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d44a101f213f6c4cdc1853d4b78aef6db6bdfa3468798cc1d9912f4735013eb" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.10.1", "libc", ] @@ -1711,7 +1711,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -1768,7 +1768,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -1779,7 +1779,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -1804,7 +1804,7 @@ dependencies = [ "base64 0.22.1", "bincode 2.0.1", "bip39", - "bitflags 2.10.0", + "bitflags 2.11.0", "cbc", "chrono", "chrono-humanize", @@ -1862,7 +1862,7 @@ dependencies = [ [[package]] name = "dash-network" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "bincode 2.0.1", "bincode_derive", @@ -1877,7 +1877,7 @@ source = "git+https://github.com/dashpay/platform?rev=d6f4eb9ac9feafaa914f06e1b7 dependencies = [ "heck", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -1917,7 +1917,7 @@ dependencies = [ [[package]] name = "dash-spv" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "anyhow", "async-trait", @@ -1950,7 +1950,7 @@ dependencies = [ [[package]] name = "dashcore" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "anyhow", "base64-compat", @@ -1976,12 +1976,12 @@ dependencies = [ [[package]] name = "dashcore-private" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" [[package]] name = "dashcore-rpc" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "dashcore-rpc-json", "hex", @@ -1994,7 +1994,7 @@ dependencies = [ [[package]] name = "dashcore-rpc-json" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "bincode 2.0.1", "dashcore", @@ -2009,7 +2009,7 @@ dependencies = [ [[package]] name = "dashcore_hashes" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "bincode 2.0.1", "dashcore-private", @@ -2122,7 +2122,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2132,7 +2132,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2161,7 +2161,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "unicode-xid", ] @@ -2175,7 +2175,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2242,7 +2242,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.6.2", "libc", "objc2 0.6.3", @@ -2256,7 +2256,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2511,7 +2511,7 @@ checksum = "6a9b567d356674e9a5121ed3fedfb0a7c31e059fe71f6972b691bcd0bfc284e3" dependencies = [ "accesskit", "ahash", - "bitflags 2.10.0", + "bitflags 2.11.0", "emath", "epaint", "log", @@ -2707,7 +2707,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2727,7 +2727,7 @@ checksum = "685adfa4d6f3d765a26bc5dbc936577de9abf756c1feeb3089b01dd395034842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2747,7 +2747,7 @@ checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2768,7 +2768,7 @@ checksum = "67c78a4d8fdf9953a5c9d458f9efe940fd97a0cab0941c075a813ac594733827" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2779,7 +2779,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -2949,7 +2949,7 @@ checksum = "a0aca10fb742cb43f9e7bb8467c91aa9bcb8e3ffbc6a6f7389bb93ffc920577d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -3086,7 +3086,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -3130,9 +3130,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -3145,9 +3145,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -3155,15 +3155,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -3172,9 +3172,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-lite" @@ -3191,32 +3191,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -3226,7 +3226,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -3369,7 +3368,7 @@ version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12124de845cacfebedff80e877bb37b5b75c34c5a4c89e47e1cdd67fb6041325" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cfg_aliases", "cgl", "dispatch2", @@ -3435,7 +3434,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "gpu-alloc-types", ] @@ -3445,7 +3444,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -3466,7 +3465,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b89c83349105e3732062a895becfc71a8f921bb71ecbbdd8ff99263e3b53a0ca" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "gpu-descriptor-types", "hashbrown 0.15.5", ] @@ -3477,7 +3476,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdf242682df893b86f33a73828fb09ca4b2d3bb6cc95249707fc684d27484b91" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -4149,7 +4148,7 @@ dependencies = [ "byteorder-lite", "moxcms", "num-traits", - "png 0.18.0", + "png 0.18.1", "tiff", "zune-core 0.5.1", "zune-jpeg 0.5.12", @@ -4307,7 +4306,7 @@ checksum = "f7946b4325269738f270bb55b3c19ab5c5040525f83fd625259422a9d25d9be5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -4375,9 +4374,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -4385,14 +4384,14 @@ dependencies = [ [[package]] name = "key-wallet" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "async-trait", "base58ck", "bincode 2.0.1", "bincode_derive", "bip39", - "bitflags 2.10.0", + "bitflags 2.11.0", "dash-network", "dashcore", "dashcore-private", @@ -4412,7 +4411,7 @@ dependencies = [ [[package]] name = "key-wallet-manager" version = "0.42.0" -source = "git+https://www.github.com/dashpay/rust-dashcore?branch=v0.42-dev#ab4aef885e7ddf8c7fc40aaabf7d29d6eeb7c2e1" +source = "git+https://www.github.com/dashpay/rust-dashcore?rev=6affdaa5db30c04f533cfac4a81b9939d1cf2545#6affdaa5db30c04f533cfac4a81b9939d1cf2545" dependencies = [ "async-trait", "bincode 2.0.1", @@ -4536,7 +4535,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "libc", "redox_syscall 0.7.1", ] @@ -4645,9 +4644,9 @@ checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "memmap2" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3" dependencies = [ "libc", ] @@ -4679,7 +4678,7 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00c15a6f673ff72ddcc22394663290f870fb224c1bfce55734a75c414150e605" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block", "core-graphics-types 0.2.0", "foreign-types 0.5.0", @@ -4794,7 +4793,7 @@ checksum = "066cf25f0e8b11ee0df221219010f213ad429855f57c494f995590c861a9a7d8" dependencies = [ "arrayvec", "bit-set 0.8.0", - "bitflags 2.10.0", + "bitflags 2.11.0", "cfg-if", "cfg_aliases", "codespan-reporting", @@ -4838,17 +4837,17 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cdede44f9a69cab2899a2049e2c3bd49bf911a157f6a3353d4a91c61abbce44" +checksum = "9d5d26952a508f321b4d3d2e80e78fc2603eaefcdf0c30783867f19586518bdc" dependencies = [ "libc", "log", "openssl", - "openssl-probe 0.1.6", + "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.11.1", + "security-framework", "security-framework-sys", "tempfile", ] @@ -4859,7 +4858,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "jni-sys", "log", "ndk-sys", @@ -4889,7 +4888,7 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225e7cfe711e0ba79a68baeddb2982723e4235247aefce1482f2f16c27865b66" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cfg-if", "cfg_aliases", "libc", @@ -4980,7 +4979,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -5075,7 +5074,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -5118,7 +5117,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "libc", "objc2 0.5.2", @@ -5134,7 +5133,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d49e936b501e5c5bf01fda3a9452ff86dc3ea98ad5f283e1455153142d97518c" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.6.2", "objc2 0.6.3", "objc2-core-foundation", @@ -5148,7 +5147,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74dd3b56391c7a0596a295029734d3c1c5e7e510a4cb30245f8221ccea96b009" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "objc2 0.5.2", "objc2-core-location", @@ -5172,7 +5171,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "objc2 0.5.2", "objc2-foundation 0.2.2", @@ -5184,7 +5183,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "dispatch2", "objc2 0.6.3", ] @@ -5195,7 +5194,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e022c9d066895efa1345f8e33e584b9f958da2fd4cd116792e15e07e4720a807" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "dispatch2", "objc2 0.6.3", "objc2-core-foundation", @@ -5238,7 +5237,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "dispatch", "libc", @@ -5251,7 +5250,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3e0adef53c21f888deb4fa59fc59f7eb17404926ee8a6f59f5df0fd7f9f3272" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "objc2 0.6.3", "objc2-core-foundation", ] @@ -5262,7 +5261,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180788110936d59bab6bd83b6060ffdfffb3b922ba1396b312ae795e1de9d81d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "objc2 0.6.3", "objc2-core-foundation", ] @@ -5285,7 +5284,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "objc2 0.5.2", "objc2-foundation 0.2.2", @@ -5297,7 +5296,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "objc2 0.5.2", "objc2-foundation 0.2.2", @@ -5320,7 +5319,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8bb46798b20cd6b91cbd113524c490f1686f4c4e8f49502431415f3512e2b6f" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "objc2 0.5.2", "objc2-cloud-kit", @@ -5352,7 +5351,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76cfcbf642358e8689af64cee815d139339f3ed8ad05103ed5eaf73db8d84cb3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "objc2 0.5.2", "objc2-core-location", @@ -5387,7 +5386,7 @@ version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cfg-if", "foreign-types 0.3.2", "libc", @@ -5404,15 +5403,9 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - [[package]] name = "openssl-probe" version = "0.2.1" @@ -5602,7 +5595,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "unicase", ] @@ -5639,7 +5632,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -5697,7 +5690,7 @@ source = "git+https://github.com/dashpay/platform?rev=d6f4eb9ac9feafaa914f06e1b7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "virtue 0.0.17", ] @@ -5739,7 +5732,7 @@ source = "git+https://github.com/dashpay/platform?rev=d6f4eb9ac9feafaa914f06e1b7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -5757,11 +5750,11 @@ dependencies = [ [[package]] name = "png" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97baced388464909d42d89643fe4361939af9b7ce7a31ee32a168f832a70f2a0" +checksum = "60769b8b31b2a9f263dae2776c37b1b28ae246943cf719eb6946a1db05128a61" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "crc32fast", "fdeflate", "flate2", @@ -5852,7 +5845,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -5916,7 +5909,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.115", + "syn 2.0.116", "tempfile", ] @@ -5930,7 +5923,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -5948,7 +5941,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "memchr", "unicase", ] @@ -6153,7 +6146,7 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -6197,7 +6190,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -6206,7 +6199,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -6420,7 +6413,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db09040cc89e461f1a265139777a2bde7f8d8c67c4936f700c63ce3e2904d468" dependencies = [ "base64 0.22.1", - "bitflags 2.10.0", + "bitflags 2.11.0", "serde", "serde_derive", "unicode-ident", @@ -6494,7 +6487,7 @@ version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -6523,7 +6516,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.115", + "syn 2.0.116", "walkdir", ] @@ -6564,7 +6557,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -6577,7 +6570,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys 0.11.0", @@ -6606,10 +6599,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -6637,7 +6630,7 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework 3.5.1", + "security-framework", "security-framework-sys", "webpki-root-certs", "windows-sys 0.61.2", @@ -6673,7 +6666,7 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd3c7c96f8a08ee34eff8857b11b49b07d71d1c3f4e88f8a88d4c9e9f90b1702" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "bytemuck", "core_maths", "log", @@ -6771,24 +6764,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" dependencies = [ - "bitflags 2.10.0", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" -dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -6797,9 +6777,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a" dependencies = [ "core-foundation-sys", "libc", @@ -6857,7 +6837,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -6882,7 +6862,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -6931,7 +6911,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -7060,7 +7040,7 @@ version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3457dea1f0eb631b4034d61d4d8c32074caa6cd1ab2d59f2327bd8461e2c0016" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "calloop 0.13.0", "calloop-wayland-source 0.3.0", "cursor-icon", @@ -7085,8 +7065,8 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0512da38f5e2b31201a93524adb8d3136276fa4fe4aafab4e1f727a82b534cc0" dependencies = [ - "bitflags 2.10.0", - "calloop 0.14.3", + "bitflags 2.11.0", + "calloop 0.14.4", "calloop-wayland-source 0.4.1", "cursor-icon", "libc", @@ -7158,7 +7138,7 @@ version = "0.3.0+sdk-1.3.268.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eda41003dc44290527a59b13432d4a0379379fa074b70174882adfbdfd917844" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -7249,7 +7229,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -7290,9 +7270,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.115" +version = "2.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e614ed320ac28113fa64972c4262d5dbc89deacdfd00c34a3e4cea073243c12" +checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb" dependencies = [ "proc-macro2", "quote", @@ -7316,7 +7296,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -7325,7 +7305,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -7466,7 +7446,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -7477,7 +7457,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -7630,7 +7610,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -7756,18 +7736,18 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.8+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0742ff5ff03ea7e67c8ae6c93cac239e0d9784833362da3f9a9c1da8dfefcbdc" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow 0.7.14", ] [[package]] name = "tonic" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a" +checksum = "7f32a6f80051a4111560201420c7885d0082ba9efe2ab61875c587bb6b18b9a0" dependencies = [ "async-trait", "base64 0.22.1", @@ -7796,21 +7776,21 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27aac809edf60b741e2d7db6367214d078856b8a5bff0087e94ff330fb97b6fc" +checksum = "ce6d8958ed3be404120ca43ffa0fb1e1fc7be214e96c8d33bd43a131b6eebc9e" dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] name = "tonic-prost" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c55a2d6a14174563de34409c9f92ff981d006f56da9c6ecd40d9d4a31500b0" +checksum = "9f86539c0089bfd09b1f8c0ab0239d80392af74c21bc9e0f15e1b4aca4c1647f" dependencies = [ "bytes", "prost", @@ -7819,16 +7799,16 @@ dependencies = [ [[package]] name = "tonic-prost-build" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4556786613791cfef4ed134aa670b61a85cfcacf71543ef33e8d801abae988f" +checksum = "65873ace111e90344b8973e94a1fc817c924473affff24629281f90daed1cd2e" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "prost-types", "quote", - "syn 2.0.115", + "syn 2.0.116", "tempfile", "tonic-build", ] @@ -7883,7 +7863,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "bytes", "futures-util", "http", @@ -7939,7 +7919,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -8075,9 +8055,9 @@ checksum = "ce61d488bcdc9bc8b5d1772c404828b17fc481c0a582b5581e95fb233aef503e" [[package]] name = "unicode-ident" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-normalization" @@ -8241,11 +8221,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom 0.3.4", + "getrandom 0.4.1", "js-sys", "serde_core", "wasm-bindgen", @@ -8432,7 +8412,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "wasm-bindgen-shared", ] @@ -8499,7 +8479,7 @@ version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "hashbrown 0.15.5", "indexmap 2.13.0", "semver", @@ -8525,7 +8505,7 @@ version = "0.31.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e6faa537fbb6c186cb9f1d41f2f811a4120d1b57ec61f50da451a0c5122bec" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "rustix 1.1.3", "wayland-backend", "wayland-scanner", @@ -8537,7 +8517,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "625c5029dbd43d25e6aa9615e88b829a5cad13b2819c4ae129fdbb7c31ab4c7e" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cursor-icon", "wayland-backend", ] @@ -8559,7 +8539,7 @@ version = "0.32.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baeda9ffbcfc8cd6ddaade385eaf2393bd2115a69523c735f12242353c3df4f3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "wayland-backend", "wayland-client", "wayland-scanner", @@ -8571,7 +8551,7 @@ version = "20250721.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40a1f863128dcaaec790d7b4b396cc9b9a7a079e878e18c47e6c2d2c5a8dcbb1" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "wayland-backend", "wayland-client", "wayland-protocols", @@ -8584,7 +8564,7 @@ version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791c58fdeec5406aa37169dd815327d1e47f334219b523444bc26d70ceb4c34e" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "wayland-backend", "wayland-client", "wayland-protocols", @@ -8597,7 +8577,7 @@ version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa98634619300a535a9a97f338aed9a5ff1e01a461943e8346ff4ae26007306b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "wayland-backend", "wayland-client", "wayland-protocols", @@ -8610,7 +8590,7 @@ version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9597cdf02cf0c34cd5823786dce6b5ae8598f05c2daf5621b6e178d4f7345f3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "wayland-backend", "wayland-client", "wayland-protocols", @@ -8717,7 +8697,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfe68bac7cde125de7a731c3400723cadaaf1703795ad3f4805f187459cd7a77" dependencies = [ "arrayvec", - "bitflags 2.10.0", + "bitflags 2.11.0", "cfg-if", "cfg_aliases", "document-features", @@ -8748,7 +8728,7 @@ dependencies = [ "arrayvec", "bit-set 0.8.0", "bit-vec 0.8.0", - "bitflags 2.10.0", + "bitflags 2.11.0", "bytemuck", "cfg_aliases", "document-features", @@ -8808,7 +8788,7 @@ dependencies = [ "arrayvec", "ash", "bit-set 0.8.0", - "bitflags 2.10.0", + "bitflags 2.11.0", "block", "bytemuck", "cfg-if", @@ -8853,7 +8833,7 @@ version = "27.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afdcf84c395990db737f2dd91628706cb31e86d72e53482320d368e52b5da5eb" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "bytemuck", "js-sys", "log", @@ -9023,7 +9003,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -9034,7 +9014,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -9045,7 +9025,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -9056,7 +9036,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -9462,7 +9442,7 @@ dependencies = [ "ahash", "android-activity", "atomic-waker", - "bitflags 2.10.0", + "bitflags 2.11.0", "block2 0.5.1", "bytemuck", "calloop 0.13.0", @@ -9610,7 +9590,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -9691,7 +9671,7 @@ dependencies = [ "heck", "indexmap 2.13.0", "prettyplease", - "syn 2.0.115", + "syn 2.0.116", "wasm-metadata", "wit-bindgen-core", "wit-component", @@ -9707,7 +9687,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "wit-bindgen-core", "wit-bindgen-rust", ] @@ -9719,7 +9699,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", - "bitflags 2.10.0", + "bitflags 2.11.0", "indexmap 2.13.0", "log", "serde", @@ -9822,7 +9802,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d039de8032a9a8856a6be89cea3e5d12fdd82306ab7c94d74e6deab2460651c5" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "dlib", "log", "once_cell", @@ -9866,7 +9846,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "synstructure", ] @@ -9923,7 +9903,7 @@ checksum = "10da05367f3a7b7553c8cdf8fa91aee6b64afebe32b51c95177957efc47ca3a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "zbus-lockstep", "zbus_xml", "zvariant", @@ -9938,7 +9918,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "zbus_names", "zvariant", "zvariant_utils", @@ -9984,7 +9964,7 @@ checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -10004,7 +9984,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "synstructure", ] @@ -10026,7 +10006,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] @@ -10096,14 +10076,14 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", ] [[package]] name = "zip" -version = "7.4.0" +version = "7.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc12baa6db2b15a140161ce53d72209dacea594230798c24774139b54ecaa980" +checksum = "c42e33efc22a0650c311c2ef19115ce232583abbe80850bc8b66509ebef02de0" dependencies = [ "crc32fast", "flate2", @@ -10213,7 +10193,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.115", + "syn 2.0.116", "zvariant_utils", ] @@ -10226,7 +10206,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.115", + "syn 2.0.116", "winnow 0.7.14", ] diff --git a/Cargo.toml b/Cargo.toml index 89326e918..8f2313ae1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,15 +91,19 @@ egui_kittest = { version = "0.33.3", features = ["eframe"] } winres = "0.1" [patch."https://github.com/dashpay/rust-dashcore"] -dash-network = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -dash-spv = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -dashcore = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -dashcore-private = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -dashcore-rpc = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -dashcore-rpc-json = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -dashcore_hashes = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -key-wallet = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } -key-wallet-manager = { git = "https://www.github.com/dashpay/rust-dashcore", branch = "v0.42-dev" } +dash-network = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +dash-spv = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +dashcore = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +dashcore-private = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +dashcore-rpc = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +dashcore-rpc-json = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +dashcore_hashes = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +key-wallet = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } +key-wallet-manager = { git = "https://www.github.com/dashpay/rust-dashcore", rev = "6affdaa5db30c04f533cfac4a81b9939d1cf2545" } + +[lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = ["cfg(tokio_unstable)"] [lints.clippy] uninlined_format_args = "allow" diff --git a/docs/ai-design/2026-02-16-spv-single-runtime-refactor/architecture.md b/docs/ai-design/2026-02-16-spv-single-runtime-refactor/architecture.md new file mode 100644 index 000000000..1cb2c50ad --- /dev/null +++ b/docs/ai-design/2026-02-16-spv-single-runtime-refactor/architecture.md @@ -0,0 +1,410 @@ +# SPV Single-Runtime Architecture + +## 1. Overview + +This document describes the refactoring of `SpvManager` to eliminate the dedicated OS thread and +secondary 4-worker tokio runtime, moving all SPV operations onto the application's existing +12-worker multi-thread tokio runtime created in `main.rs`. + +### Current Architecture + +``` +main() ──► 12-worker tokio runtime ──► block_on(start()) ──► eframe UI loop + ├── AppContext backend tasks (tokio::spawn) + ├── reconcile / finality listeners + └── spv_wallet_load / unload + +SpvManager::start() ──► std::thread("spv") ──► NEW 4-worker tokio runtime + └── block_on(run_spv_loop) + ├── build_client / start / monitor_network + └── event handlers (spawn_sync → tokio::spawn on SPV runtime) +``` + +### Target Architecture + +``` +main() ──► 12-worker tokio runtime ──► block_on(start()) ──► eframe UI loop + ├── AppContext backend tasks (tokio::spawn) + ├── reconcile / finality listeners + ├── spv_wallet_load / unload + └── SpvManager::start() ──► tokio::spawn(run_spv_loop) + ├── build_client / start / monitor_network + └── event handlers (spawn_sync → tokio::spawn on SAME runtime) +``` + +### Key Insight + +The separate OS thread + runtime was originally added as a resource-isolation measure ("ensures SPV +sync doesn't compete with UI thread resources"). However, this isolation provides minimal benefit +because: + +1. The UI event loop itself runs on the main tokio runtime via `block_on(start())`, which means + the main runtime already handles significant I/O work from backend tasks. +2. The 12-worker pool has ample capacity for SPV's network I/O (peer connections, block filter + downloads, header sync). +3. CPU-intensive operations (if any) in the SPV client should use `spawn_blocking`, not a separate + runtime. +4. Having two runtimes complicates shutdown coordination, cross-runtime channel communication, + and debugging. + +## 2. Detailed Design + +### 2.1 SpvManager::start() — Replace Thread + Runtime with tokio::spawn + +**Current** (`src/spv/manager.rs:376-442`): + +```rust +pub fn start(self: &Arc, expected_wallet_count: usize) -> Result<(), String> { + // ... status checks, token setup ... + std::thread::Builder::new() + .name("spv".to_string()) + .spawn(move || { + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(4) + .enable_all() + .thread_name("spv-rt") + .build() + .expect("Failed to create SPV runtime"); + rt.block_on(async move { + // ... run_spv_loop ... + }); + }) + .map_err(|e| format!("Failed to spawn SPV thread: {e}"))?; + Ok(()) +} +``` + +**Proposed**: + +```rust +pub fn start(self: &Arc, expected_wallet_count: usize) -> Result<(), String> { + // ... status checks, token setup (unchanged) ... + + let manager = Arc::clone(self); + let global_cancel = self.subtasks.cancellation_token.clone(); + + // Spawn on the existing main tokio runtime instead of creating a new one + self.subtasks.spawn_sync("spv_main_loop", async move { + let manager_for_loop = Arc::clone(&manager); + if let Err(err) = manager_for_loop + .run_spv_loop(stop_token, global_cancel, expected_wallet_count) + .await + { + tracing::error!(error = %err, network = ?manager.network, "SPV runtime failed"); + if let Err(e) = manager.write_last_error(Some(err.clone())) { + tracing::error!("Failed to write SPV error: {}", e); + } + if let Err(e) = manager.write_status(SpvStatus::Error) { + tracing::error!("Failed to write SPV status: {}", e); + } + } + + // Clean up on exit + if let Ok(mut guard) = manager.stop_token.lock() { + *guard = None; + } + }); + + Ok(()) +} +``` + +**Rationale**: `run_spv_loop` is an async function. `DashSpvClient::monitor_network` is async +and uses `tokio::select!` internally. All I/O operations (peer connections, message send/recv) +use tokio's networking primitives, so they naturally work on any tokio multi-thread runtime. +There is no requirement for a dedicated runtime. + +### 2.2 Event Handler Spawning — No Changes Required + +The event handlers (`spawn_sync_event_handler`, `spawn_wallet_event_handler`, +`spawn_network_event_handler`, `spawn_progress_watcher`, `spawn_request_handler`) all use +`self.subtasks.spawn_sync(name, future)` which calls `tokio::spawn`. + +Currently, because these are called from within `run_spv_loop` which runs inside the SPV +runtime's `block_on`, `tokio::spawn` targets the SPV runtime. After the refactoring, +`run_spv_loop` will run as a task on the main runtime, so `tokio::spawn` will correctly +target the main runtime. **No code changes needed** in these methods. + +### 2.3 get_quorum_public_key — No Changes Required + +This method at `src/spv/manager.rs:616-675` uses: + +```rust +tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { ... }) +}) +``` + +This pattern works correctly on any multi-thread tokio runtime. `block_in_place` temporarily +converts the current worker thread into a blocking thread, and `Handle::current()` gets the +handle of whatever runtime is active. Since this method is called from contexts that already +have a tokio runtime handle (either from the SPV context provider or from backend tasks), +**no changes needed**. + +Important: `block_in_place` requires a multi-thread runtime. The main runtime is multi-thread +(12 workers), so this is safe. + +### 2.4 Shutdown — Simplified + +**Current shutdown flow**: +1. `AppContext::stop_spv()` → `SpvManager::stop()` → cancels `stop_token` +2. `stop_token` cancellation triggers the `tokio::select!` in `run_sync_and_monitor` +3. `monitor_cancel.cancel()` stops the network monitor +4. `client.stop().await` cleans up +5. The SPV runtime's `block_on` returns, the OS thread exits +6. App `on_exit` → `subtasks.shutdown()` → cancels global token, joins tasks + +**Problem with current**: `subtasks.shutdown()` only joins tasks on the main runtime's +`JoinSet`. The SPV thread + runtime are completely independent and not tracked. If SPV +shutdown hangs, the app has no visibility or control. + +**After refactoring**: +1. `SpvManager::stop()` → cancels `stop_token` (unchanged) +2. `run_spv_loop` is now a task in the main `TaskManager`'s `JoinSet` +3. `subtasks.shutdown()` cancels the global token AND waits for all tasks including SPV +4. Single unified shutdown path with timeout and diagnostics + +This is a **correctness improvement**: the SPV loop is now properly tracked by the +`TaskManager` and will appear in shutdown diagnostics if it hangs. + +### 2.5 DashSpvClient Ownership and Send Safety + +`DashSpvClient` contains a `SyncManager` that is not behind `Arc>` (by design — +see the doc comment at `client/core.rs:110-133`). The client uses `&mut self` for +`monitor_network`, `start`, and `stop`. + +The client is created, started, monitored, and stopped all within a single `async` block +in `run_spv_loop` → `run_sync_and_monitor`. It is never shared across tasks. This pattern +is compatible with `tokio::spawn` as long as `DashSpvClient` is `Send`. + +**Verification**: `DashSpvClient, PeerNetworkManager, DiskStorageManager>` +must be `Send` for the future returned by `run_spv_loop` to be `Send` (required by +`tokio::spawn`). The struct fields are: +- `config: ClientConfig` — owned data, `Send` +- `state: Arc>` — `Send + Sync` +- `network: PeerNetworkManager` — must be `Send` +- `storage: Arc>` — `Send + Sync` +- `wallet: Arc>>` — `Send + Sync` (already shared via `Arc`) +- `sync_manager: SyncManager` — must be `Send` +- Various channels (mpsc senders/receivers) — `Send` + +If this compiles currently (which it does, since the SPV thread creates and uses the client +within `block_on`), the types are `Send`. The compiler will verify this at the call site +when we change to `tokio::spawn`. + +### 2.6 TaskManager Interaction + +The `SpvManager` receives a `subtasks: Arc` in its constructor. This `TaskManager` +is the **same instance** shared by `AppContext` and `AppState`. Currently: + +- `SpvManager` uses `self.subtasks.spawn_sync()` for event handlers → spawns on SPV runtime +- `AppContext` uses `self.subtasks.spawn_sync()` for reconcile/finality listeners → spawns on main runtime + +After refactoring, **all** `spawn_sync` calls target the main runtime. The `TaskManager`'s +`JoinSet` will contain all SPV-related tasks in one place. This simplifies shutdown and +gives consistent task tracking. + +## 3. File-by-File Change List + +### `src/spv/manager.rs` + +**Change 1: `SpvManager::start()` (lines 376-442)** + +Remove the `std::thread::Builder` + `tokio::runtime::Builder` block. Replace with +`self.subtasks.spawn_sync("spv_main_loop", async move { ... })`. + +The async block body is identical to what was inside `rt.block_on(async move { ... })`. + +**Change 2: Remove unused imports** + +After removing the thread/runtime code, the following imports become unused and should +be removed: +- No specific runtime-related imports are used directly (the runtime builder was inline). + +**No other changes needed in this file.** The `run_spv_loop`, `run_sync_and_monitor`, +event handler spawning methods, `build_client`, `stop`, `get_quorum_public_key`, +wallet management methods, and all helper methods remain unchanged. + +### `src/main.rs` + +**No changes.** The 12-worker runtime stays as-is. + +### `src/utils/tasks.rs` + +**No changes.** `TaskManager` and `spawn_sync` work correctly. The SPV main loop +task will now appear in the `JoinSet` alongside other tasks. + +### `src/context/wallet_lifecycle.rs` + +**No changes.** `start_spv()` calls `self.spv_manager.start(expected_wallets)` which +now spawns a task instead of a thread. The calling code doesn't need to know. + +### `src/app.rs` + +**No changes.** Shutdown via `self.subtasks.shutdown()` now automatically covers +the SPV loop since it's in the same `JoinSet`. + +### `src/spv/mod.rs` + +**No changes.** + +### `src/spv/error.rs` + +**No changes.** + +## 4. Deadlock and Concurrency Risk Analysis + +### 4.1 Risk: TaskManager JoinSet Lock Contention + +**Concern**: `TaskManager::spawn_sync` acquires a `tokio::sync::Mutex` on the `JoinSet` +to add tasks. The SPV event handlers spawn tasks frequently. Could this cause contention? + +**Assessment**: LOW RISK. The lock is held only for the duration of inserting into the +`JoinSet`, which is O(1). The `tokio::sync::Mutex` is fair and async-aware, so it won't +block worker threads. This is the same pattern used today for all other background tasks. + +### 4.2 Risk: SPV Competing for Worker Threads + +**Concern**: SPV's `monitor_network` and event handlers now share worker threads with +UI backend tasks. Could SPV monopolize workers? + +**Assessment**: LOW RISK. SPV operations are predominantly I/O-bound (network reads/writes, +disk storage). Tokio's work-stealing scheduler distributes I/O tasks efficiently across +12 workers. The SPV client uses standard tokio I/O primitives (`TcpStream`, channels, +`select!`), so it cooperatively yields. If CPU-intensive processing occurs (e.g., header +validation), it happens in small bursts between awaits. + +**Mitigation** (if needed later, not recommended initially): Use `tokio::task::spawn_blocking` +for any CPU-heavy SPV operations. This is a targeted fix, not a blanket approach. + +### 4.3 Risk: Shutdown Ordering + +**Concern**: `TaskManager::shutdown()` cancels the global `CancellationToken` and then +joins all tasks. SPV event handlers check `cancel.cancelled()` in their `tokio::select!` +loops. Could the SPV main loop and its child handlers deadlock during shutdown? + +**Assessment**: LOW RISK. The shutdown sequence is: +1. Global cancel token fires +2. All event handlers break out of their loops (they all check `cancel.cancelled()`) +3. `run_sync_and_monitor` detects `global_cancel.cancelled()`, cancels monitor, calls `client.stop()` +4. `run_spv_loop` cleans up shared state (interface, storage, etc.) +5. All tasks complete and are joined + +The event handlers are independent tokio tasks. They don't hold any locks that the main +loop needs during cleanup. The main loop clears shared state (`client_interface`, +`network_manager`, etc.) after the client stops, which is safe because the handlers have +already exited. + +### 4.4 Risk: `block_in_place` in `get_quorum_public_key` + +**Concern**: `block_in_place` temporarily converts a worker thread to blocking mode. +With a shared runtime, this reduces available workers from 12 to 11 temporarily. + +**Assessment**: LOW RISK. This operation is infrequent (only during quorum key lookups) +and short-lived (a single async channel send + receive). Losing one worker temporarily +out of 12 has negligible impact. This is actually the intended use case for `block_in_place`. + +### 4.5 Risk: Cross-Task Data Races on Shared State + +**Concern**: `SpvManager` uses `Arc>` (std) and `Arc>` (tokio) +for shared state. Moving to a single runtime doesn't change the sharing pattern. + +**Assessment**: NO RISK. The synchronization primitives are the same. The data flows +through the same channels. The only difference is which runtime executes the tasks, not +how they synchronize. + +### 4.6 Risk: `DashSpvClient` and `!Send` / `!Sync` Concerns + +**Concern**: If `DashSpvClient` or the future returned by `run_spv_loop` is not `Send`, +`tokio::spawn` will fail to compile. + +**Assessment**: COMPILE-TIME CHECK. The Rust compiler will catch this immediately. If +the types are `Send` (which they must be since they currently work in `block_on` on a +multi-thread runtime), they will work with `tokio::spawn`. If for some reason a type +is `!Send`, the compiler error will pinpoint the exact field/type. + +### 4.7 Risk: Reconcile/Finality Listeners Running on Wrong Runtime + +**Concern**: Currently, reconcile and finality listeners are spawned by `AppContext` +on the main runtime. SPV event handlers are spawned on the SPV runtime. After the +change, both are on the main runtime. + +**Assessment**: POSITIVE CHANGE. This eliminates a subtle cross-runtime timing issue. +Previously, reconcile signals sent from the SPV runtime's event handlers to the main +runtime's listeners had to cross runtime boundaries via tokio mpsc channels. Now they're +all on the same runtime, which is slightly more efficient and easier to reason about. + +## 5. Migration Path + +This refactoring is a single atomic change because the modifications are minimal: + +### Step 1: Modify `SpvManager::start()` (the only code change) + +Replace the thread + runtime block with `self.subtasks.spawn_sync(...)`. This is a +~15 line change in a single method. + +### Step 2: Compile and verify `Send` bounds + +Run `cargo build`. If any type is `!Send`, the compiler will report it. This is the +primary risk gate. + +### Step 3: Test SPV functionality + +1. Start the app with SPV enabled +2. Verify SPV sync starts and progresses (status transitions: Starting → Syncing → Running) +3. Verify wallet reconciliation works (balances update after sync) +4. Verify clean shutdown (no hanging tasks in shutdown logs) +5. Verify stop/restart SPV from UI works correctly +6. Verify quorum key lookups work (identity operations) + +### Step 4: Verify shutdown diagnostics + +1. Start app with SPV running +2. Close the app +3. Check logs for clean shutdown — the `spv_main_loop` task should appear in the + `TaskManager` shutdown trace alongside other tasks + +### Step 5: Run existing tests + +```bash +cargo test --all-features --workspace +cargo clippy --all-features --all-targets -- -D warnings +``` + +## 6. Open Questions + +### Q1: Should we remove the `subtasks` field from `SpvManager`? + +Currently, `SpvManager` stores its own `Arc` reference, which is the same +instance as `AppContext.subtasks`. This is correct and useful — the SPV manager's event +handlers are logically "subtasks" of the SPV lifecycle but need to be tracked for shutdown. + +**Recommendation**: Keep as-is. The `TaskManager` is shared, not duplicated. + +### Q2: Should `DashSpvClient` operations use `spawn_blocking` for CPU work? + +The SPV client does header validation, merkle proof verification, and compact block +filter matching. These are CPU operations but typically fast (microseconds to low +milliseconds per item). + +**Recommendation**: No. Keep as-is unless profiling shows that SPV CPU work causes +UI frame drops (> 16ms blocking on a worker thread). The 12-worker pool provides +sufficient headroom. + +### Q3: Should we adjust the worker thread count? + +Currently 12 workers. With SPV now sharing the pool, should we increase it? + +**Recommendation**: No. 12 workers is already generous for an egui desktop app. +SPV adds a handful of I/O-bound tasks (peer connections, event handlers). The +work-stealing scheduler handles this efficiently. Monitor and adjust only if +performance issues are observed. + +### Q4: What about the ZMQ listener (`CoreZMQListener`)? + +The `CoreZMQListener` in `src/components/core_zmq_listener.rs` also creates a +separate OS thread + runtime (line 308-310). This is a separate concern and should +be addressed in a follow-up refactoring if desired. + +**Recommendation**: Out of scope for this change. Address separately if unified +runtime is a broader goal. diff --git a/docs/ai-design/2026-02-16-spv-single-runtime-refactor/code-review.md b/docs/ai-design/2026-02-16-spv-single-runtime-refactor/code-review.md new file mode 100644 index 000000000..76874c21f --- /dev/null +++ b/docs/ai-design/2026-02-16-spv-single-runtime-refactor/code-review.md @@ -0,0 +1,186 @@ +# SPV Refactor Code Review: Single-Runtime Migration + +**Reviewer**: code-reviewer agent +**Date**: 2026-02-16 +**Branch**: `refactor/no-separate-spv-thread` +**Scope**: Elimination of separate SPV OS thread + tokio runtime; SPV now runs on the main 12-worker tokio runtime via `TaskManager::spawn_sync`. + +--- + +## Critical Issues (must fix before merge) + +**None identified.** + +The refactoring is minimal and well-targeted. The change from `std::thread::Builder::new("spv").spawn(|| { rt.block_on(...) })` to `self.subtasks.spawn_sync("spv_main_loop", async move { ... })` is mechanically sound and does not introduce new deadlock vectors. + +--- + +## Warnings (should fix) + +### W1. `block_in_place` + `block_on` in `get_quorum_public_key` — now riskier on shared runtime + +**File**: `src/spv/manager.rs:639-664` + +```rust +tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + interface.get_quorum_by_height(...).await + }) +}) +``` + +Previously this ran on the separate 4-worker SPV runtime, so blocking a worker there had no impact on the main UI task pipeline. Now it runs on the shared 12-worker runtime. `block_in_place` temporarily converts the current worker to a blocking thread and spawns a replacement, which is correct but: + +- If called frequently or if the quorum lookup is slow (network round-trip through the unbounded command channel), it can exhaust tokio worker threads temporarily. +- This is a sync method called from unknown contexts. If ever called from a tokio task that is not `block_in_place`-safe (e.g., from inside a `spawn_blocking` context), it will panic. + +**Recommendation**: Consider making this method `async` and using `.await` directly, or document that it must only be called from a tokio worker context. The risk is low in practice given 12 workers and infrequent quorum lookups, but it is worth noting. + +### W2. `reconcile_spv_wallets` holds multiple lock layers simultaneously + +**File**: `src/context/wallet_lifecycle.rs:555-767` + +The reconciliation function acquires: +1. `self.spv_manager.wallet().read().await` (tokio RwLock, held for entire function body) +2. `self.wallets.read().unwrap()` (std RwLock, held for entire function body) +3. Individual `wallet_arc.write()` (std RwLock, nested inside #2's scope) +4. `self.db.*` operations (std Mutex on SQLite connection) + +This is a complex lock hierarchy. The key concern is that `self.wallets` is a `std::sync::RwLock` and it is acquired while an `await`-capable tokio lock is held. If any other code path acquires these locks in a different order, a deadlock could occur. + +**Assessment**: The lock ordering appears consistent across the codebase (SPV wallet lock first, then DET wallets lock, then individual wallet locks). However, the function is long and holds locks for an extended period. This is pre-existing code and not introduced by this refactoring, so it is a warning rather than a blocker. + +**Recommendation**: Consider breaking the reconciliation into smaller lock scopes, particularly releasing the tokio `wm` read lock before doing DB writes. + +### W3. `std::sync::Mutex` guards held across sync points in event handler setup + +**File**: `src/spv/manager.rs:1069-1070, 1142` + +```rust +let reconcile_tx = self.reconcile_tx.lock().ok().and_then(|g| g.clone()); +let finality_tx = self.finality_tx.lock().ok().and_then(|g| g.clone()); +``` + +These lines acquire `std::sync::Mutex` locks, clone the inner `Option`, and immediately drop the guard. This is correct -- the guards are not held across `.await` points. The cloned senders are then moved into the spawned async task. No issue here, but worth confirming the pattern is intentional (it is). + +--- + +## Informational Notes + +### I1. Lock inventory and ordering analysis + +The `SpvManager` struct uses a mix of synchronization primitives: + +| Field | Type | Usage | +|-------|------|-------| +| `status`, `last_error`, `started_at`, `sync_progress_state`, `progress_updated_at`, `det_wallets`, `connected_peers`, `client_interface`, `config` | `std::sync::RwLock` | Short-lived reads/writes, never held across `.await` | +| `storage`, `reconcile_tx`, `finality_tx`, `stop_token`, `request_tx` | `std::sync::Mutex` | Short-lived lock-clone-drop pattern, never held across `.await` | +| `wallet`, `network_manager` | `tokio::sync::RwLock` | Held across `.await` in async methods | + +**Lock ordering is consistent**: All `std::sync` locks are acquired, used briefly (read/clone/write a primitive), and dropped before any `.await`. The `tokio::sync::RwLock` instances (`wallet`, `network_manager`) are only held across `.await` in async contexts where this is expected. + +**No deadlock risk identified** from lock ordering. + +### I2. Channel capacity analysis + +| Channel | Type | Capacity | Risk | +|---------|------|----------|------| +| `reconcile_tx` | `tokio::sync::mpsc` | 64 | Low -- `try_send` used, drops on full | +| `finality_tx` | `tokio::sync::mpsc` | 64 | Low -- `try_send` used, drops on full | +| `request_tx` | `tokio::sync::mpsc` | 32 | Low -- only transaction broadcasts | +| `command_tx` (DashSpvClientInterface) | `tokio::sync::mpsc::unbounded` | Unbounded | See I3 | +| `sync_rx`, `wallet_rx`, `net_rx` | `tokio::sync::broadcast` | SDK-defined | Lagged events handled correctly | +| `progress_rx` | `tokio::sync::watch` | 1 (latest value) | Correct for progress updates | + +### I3. Unbounded channel for `DashSpvClientCommand` + +**File**: `src/spv/manager.rs:866` + +The unbounded channel is required by the SDK's `DashSpvClientInterface` API. Currently only one command type exists (`GetQuorumByHeight`), which is called infrequently (only during identity operations that need quorum validation). The comment at line 864-865 accurately documents this. + +**Risk**: Negligible in practice. The channel would only grow unbounded if quorum lookups are requested faster than the SPV monitor loop can process them, which is not a realistic scenario. + +### I4. Shutdown ordering is correct + +The shutdown flow works as follows: + +1. `AppState::on_exit()` calls `self.subtasks.shutdown()` (`src/app.rs:1133`) +2. `TaskManager::shutdown()` cancels the `CancellationToken` and joins all tasks with a 10-second timeout (`src/utils/tasks.rs:64-132`) +3. All SPV subtasks (`spv_main_loop`, `spv_request_handler`, `spv_progress_watcher`, `spv_sync_event_handler`, `spv_wallet_event_handler`, `spv_network_event_handler`) listen on either the global cancellation token or the local `stop_token` +4. The `spv_main_loop` task cancels `monitor_cancel` which causes `monitor_network` to exit, then calls `client.stop()`, then cleans up shared state + +Since all SPV tasks are now in the same `JoinSet` as the rest of the application tasks, the unified shutdown path in `TaskManager::shutdown()` covers them correctly. Previously the separate OS thread had its own shutdown path which was harder to coordinate. + +### I5. UI thread contention is minimal + +The UI thread calls `SpvManager::status()` (sync method, `src/spv/manager.rs:355-374`) which acquires several `std::sync::RwLock` read locks. These locks are: +- Uncontended in practice (writers are event handlers that do quick writes) +- Never held across `.await` by writers +- Read-preferring (multiple readers can proceed concurrently) + +No risk of UI thread stalling from SPV lock contention. + +### I6. `start_spv` race condition guard + +**File**: `src/context/wallet_lifecycle.rs:46-49` + +```rust +if self.spv_manager.status().status.is_active() { + return Ok(()); +} +``` + +This is a TOCTOU check (time-of-check-time-of-use), but `SpvManager::start()` at line 378-386 also checks `stop_token.is_some()` under lock, providing the actual protection against double-start. The outer check is a fast path optimization. This is correct. + +### I7. `spawn_sync` double-indirection through `tokio::spawn` + +**File**: `src/utils/tasks.rs:41` + +```rust +tokio::spawn(spawn_subtask(subtasks, name, future)); +``` + +The `spawn_subtask` function acquires a tokio Mutex on the JoinSet, then spawns the actual task into it. This means each `spawn_sync` call creates an intermediate task just to register the real task. This is a minor efficiency concern but is pre-existing and not introduced by this refactoring. + +### I8. Task name tracking for diagnostics + +The refactoring benefits from the task naming system (`spawn_sync("spv_main_loop", ...)`, etc.) which was added in recent commits. During shutdown timeouts, the remaining task names are logged, making it easy to diagnose which SPV tasks are slow to shut down. This is a nice improvement over the previous separate-thread approach where the SPV thread was opaque to the task manager. + +--- + +## dash-spv Dependency Analysis + +**Source**: `dash-spv` from `https://www.github.com/dashpay/rust-dashcore`, branch `v0.42-dev` + +### Are the key types Send + Sync? + +- **`DashSpvClient`**: Contains `Arc>`, `Arc>`, `Arc>`, etc. The struct itself is `Send + Sync` when its generic parameters are `Send + Sync`. With `WalletManager`, `PeerNetworkManager`, and `DiskStorageManager`, this holds. + +- **`PeerNetworkManager`**: Uses `Arc`-wrapped fields throughout (pool, discovery, reputation manager, etc.) and implements `Clone` explicitly. All internal mutability uses `tokio::sync::Mutex`. It is `Send + Sync`. + +- **`DiskStorageManager`**: Wrapped in `Arc>` when stored in `DashSpvClient`. The inner type need only be `Send`, which it is (it wraps file handles and SQLite connections). + +- **`DashSpvClientInterface`**: Contains only `mpsc::UnboundedSender`, which is `Send + Sync + Clone`. + +### Does `monitor_network()` spawn internal tasks? + +Yes. The `run()` method in `sync_coordinator.rs:91-102` spawns tasks via `tokio::spawn`. The `PeerNetworkManager` also spawns tasks internally (3 `tokio::spawn` calls in `manager.rs`). These internal tasks will now run on the main 12-worker runtime instead of the separate 4-worker runtime. + +**Impact**: The total task count on the main runtime increases. With ~8 internal dash-spv tasks plus the 6 DET-spawned SPV handler tasks, approximately 14 tasks are added to the main runtime. Given 12 worker threads and the cooperative nature of tokio tasks, this is well within capacity. + +### Unbounded channel memory safety + +The `DashSpvClientCommand` unbounded channel (`mpsc::unbounded_channel`) is a design choice in the dash-spv SDK. Currently only `GetQuorumByHeight` commands are sent through it, and these are infrequent request-response pairs. The backpressure concern is theoretical; in practice the channel will rarely have more than a single pending message. + +--- + +## Verdict: **APPROVE** + +The refactoring is clean, minimal, and correct. The change from a separate OS thread + dedicated tokio runtime to a single `spawn_sync` call on the shared runtime: + +1. **Eliminates runtime overhead** of maintaining a separate 4-worker tokio runtime +2. **Simplifies shutdown** by bringing all SPV tasks under the unified `TaskManager` JoinSet +3. **Preserves all existing concurrency guarantees** -- no lock ordering changes, no new shared state +4. **Does not introduce deadlocks** -- the lock analysis confirms all `std::sync` locks are short-lived and never held across `.await` points + +The warnings (W1, W2) are pre-existing concerns that were not introduced by this refactoring and should be addressed separately. No changes are required for merge. diff --git a/docs/ai-design/2026-02-16-spv-single-runtime-refactor/security-audit.md b/docs/ai-design/2026-02-16-spv-single-runtime-refactor/security-audit.md new file mode 100644 index 000000000..f3b5a0796 --- /dev/null +++ b/docs/ai-design/2026-02-16-spv-single-runtime-refactor/security-audit.md @@ -0,0 +1,298 @@ +# SPV Module Security Audit Report + +**Date**: 2026-02-16 +**Auditor**: Security Engineer (automated) +**Scope**: `src/spv/` module (manager.rs, error.rs, mod.rs) and related Cargo.toml dependencies +**Branch**: `refactor/no-separate-spv-thread` +**Commit**: `7f2e70c7` + +--- + +## Executive Summary + +The SPV module manages Simplified Payment Verification for the Dash Evo Tool desktop application. The recent refactoring moved the SPV client from a separate OS thread with its own tokio runtime to a spawned task on the main tokio runtime. This audit covers the entire SPV module with emphasis on cryptographic material handling, network security, resource exhaustion, and path traversal risks. + +**Overall Risk Assessment**: **Medium**. One high-severity finding related to cryptographic key material not being zeroized after use. Several medium and low findings related to path traversal, unbounded channels, and potential runtime blocking. No critical vulnerabilities found. + +--- + +## Critical Findings + +*None identified.* + +--- + +## High Risk Findings + +### H-1: Extended Private Key String Not Zeroized After Use + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:699-711` +**Type**: CWE-316 (Cleartext Storage of Sensitive Information in Memory) +**CVSS Estimate**: 6.5 (High - local attacker with memory access) + +**Description**: In `load_wallet_from_seed()`, the `seed_bytes` array is correctly zeroized on both the success and error paths (lines 695 and 698). However, the `xprv_str` variable (line 699), which contains the Base58-encoded extended private key, is never zeroized. This string remains in heap memory until the allocator reuses the memory region. + +```rust +let xprv = ExtendedPrivKey::new_master(self.network, &seed_bytes).map_err(|e| { + seed_bytes.zeroize(); // Good: seed zeroized on error + format!("ExtendedPrivKey::new_master failed: {e}") +})?; +seed_bytes.zeroize(); // Good: seed zeroized on success +let xprv_str = xprv.to_string(); // BAD: String not zeroized after use + +let account_options = Self::default_account_creation_options(); + +let wallet_id = match wm.import_wallet_from_extended_priv_key(&xprv_str, account_options) { + Ok(id) => id, + Err(WalletError::WalletExists(id)) => id, + Err(err) => { + // xprv_str is dropped here without zeroization + return Err(format!("import_wallet_from_extended_priv_key failed: {err}")); + } +}; +// xprv_str is dropped later without zeroization +``` + +Additionally, the `ExtendedPrivKey` struct (`xprv`) itself likely contains the raw private key bytes in memory and is also not zeroized. Whether `ExtendedPrivKey` implements `Zeroize` on `Drop` depends on the upstream `dashcore` crate; this should be verified. + +**Impact**: An attacker with access to the process memory (via core dump, swap file, memory forensics, or a separate vulnerability allowing memory reads) could extract the extended private key and derive all wallet keys, gaining full control over the user's funds. + +**Remediation**: +```rust +let mut xprv_str = xprv.to_string(); +// ... use xprv_str ... +xprv_str.zeroize(); +``` +Also consider wrapping `xprv` in a `Zeroizing` if the type supports it, or manually zeroing the struct's memory after use. + +--- + +## Medium Risk Findings + +### M-1: Path Traversal via `devnet_name` in SPV Data Directory + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:1287-1304` +**Type**: CWE-22 (Path Traversal) + +**Description**: The `build_spv_data_dir()` function uses `config.devnet_name` directly as a path component without sanitization: + +```rust +Network::Devnet => config + .devnet_name + .clone() + .unwrap_or_else(|| "devnet".to_string()), +``` + +The `devnet_name` field originates from the `.env` configuration file (parsed via the `envy` crate). If `devnet_name` contains path traversal characters (e.g., `../../etc` or absolute paths), the resulting data directory could point outside the intended SPV data directory. + +**Impact**: A malicious or corrupted `.env` file could cause SPV data to be written to or read from an arbitrary directory on the filesystem. Since `.env` is a local file under the user's control, exploitation requires either: +1. A supply-chain attack replacing the `.env` file +2. A separate vulnerability allowing file writes to the config directory +3. Social engineering to trick the user into using a crafted config + +The risk is **medium** because `.env` is locally controlled, but the lack of validation is a defense-in-depth failure. + +**Remediation**: Validate `devnet_name` to ensure it contains only alphanumeric characters, hyphens, and underscores: +```rust +Network::Devnet => { + let name = config.devnet_name.clone().unwrap_or_else(|| "devnet".to_string()); + if !name.chars().all(|c| c.is_alphanumeric() || c == '-' || c == '_') { + return Err(format!("Invalid devnet_name: contains disallowed characters: {}", name)); + } + name +} +``` + +### M-2: Unbounded Channel for SPV Client Commands + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:866` +**Type**: CWE-400 (Uncontrolled Resource Consumption) + +**Description**: The `DashSpvClientInterface` command channel uses `tokio::sync::mpsc::unbounded_channel()`. The code comment states "Memory usage is bounded in practice by SPV command processing speed," but this is an assumption, not a guarantee. + +```rust +let (command_tx, command_receiver) = tokio::sync::mpsc::unbounded_channel(); +``` + +If the consumer (`monitor_network`) stalls or slows down (e.g., due to network latency, disk I/O pressure, or a malicious peer sending data that triggers slow processing), commands can accumulate without bound. + +**Impact**: Potential memory exhaustion denial-of-service. The impact is limited because the `DashSpvClientInterface` is only used for quorum lookups (not high-frequency operations), and the channel is internal (not exposed to external input). However, in theory a slow-processing scenario could cause unbounded growth. + +**Remediation**: This is an SDK API constraint (the `DashSpvClientInterface` requires an unbounded channel). The existing comment documents this well. Consider monitoring the channel's length or adding a periodic log warning if the queue grows beyond a threshold. If the SDK API can be modified upstream, switch to a bounded channel. + +### M-3: `block_in_place` May Block Main Runtime + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:639-664` +**Type**: CWE-400 (Resource Exhaustion), Performance Degradation + +**Description**: The `get_quorum_public_key()` method uses `tokio::task::block_in_place()` with `block_on()` inside it. After the refactoring, all SPV operations now share the main tokio runtime. If a quorum lookup takes a long time (network timeout, unresponsive peer), this blocks a worker thread in the main runtime. + +```rust +tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + interface + .get_quorum_by_height(core_chain_locked_height, llmq_type, qh) + .await + }) +}) +``` + +With 12 worker threads configured for the main runtime, a few concurrent stalled quorum lookups could starve the UI and other async tasks. + +**Impact**: Potential UI freeze or degraded responsiveness if quorum lookups stall. This is not a security vulnerability per se, but could contribute to denial-of-service conditions in edge cases. + +**Remediation**: Add a timeout to the quorum lookup: +```rust +tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + tokio::time::timeout( + std::time::Duration::from_secs(10), + interface.get_quorum_by_height(core_chain_locked_height, llmq_type, qh) + ) + .await + .map_err(|_| "Quorum lookup timed out".to_string())? + // ... + }) +}) +``` + +--- + +## Low Risk / Informational Findings + +### L-1: DNS Resolution of User-Provided Host Without Validation + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:1270-1284` +**Type**: CWE-918 (Server-Side Request Forgery - variant) + +**Description**: `primary_peer_socket()` resolves `config.core_host` via `ToSocketAddrs` without any validation of the hostname. The hostname comes from the `.env` configuration file. + +```rust +let host = config.core_host.as_str(); +let addr = format!("{}:{}", host, port); +addr.to_socket_addrs().ok()?.next() +``` + +Since this is a desktop application where the user controls the configuration, this is low risk. However, there is no validation that the host is a reasonable value (e.g., not a local service like `localhost:6379` that could be used for SSRF-like attacks if the SPV protocol messages could be crafted to interact with non-SPV services). + +**Impact**: Minimal. The SPV protocol handshake would fail against non-SPV services. The user controls the configuration file. + +**Remediation**: Consider validating that the port is one of the expected Dash P2P ports, or at minimum log a warning when connecting to unexpected hosts/ports. + +### L-2: Error Information Disclosure in Error Messages + +**Location**: Various locations throughout `src/spv/manager.rs` +**Type**: CWE-209 (Information Exposure Through Error Messages) + +**Description**: Error messages include internal details like lock names, file paths, and internal error strings. Since errors are displayed to the end user via the UI (per the project's error handling convention of `Result`), this could expose implementation details. + +Examples: +- `"SPV stop_token lock poisoned"` (line 382) +- `"Failed to create SPV data dir: {e}"` which includes the full path (line 289) +- `"client_interface lock poisoned: {e}"` (line 623) + +**Impact**: Low. This is a desktop application, so information disclosure is to the local user only. However, if error messages are ever sent to a remote telemetry service, this should be reconsidered. + +**Remediation**: Consider using generic user-facing messages while logging detailed errors via `tracing`. + +### L-3: No Timeout on Wallet Loading Wait Loop + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:784-815` +**Type**: CWE-835 (Loop with Unreachable Exit Condition) - minor variant + +**Description**: The wallet loading wait loop has a 30-second timeout, which is appropriate. However, during this wait, it polls every 50ms while holding and releasing the async read lock repeatedly. This is a minor efficiency concern, not a security issue. The existing implementation is acceptable. + +**Impact**: Negligible. The timeout prevents indefinite blocking. + +### L-4: Broadcast Channel Lagging Handled Gracefully + +**Location**: `/home/ubuntu/git/dash-evo-tool/src/spv/manager.rs:1122-1128, 1158-1164, 1196-1198` + +**Description**: All three broadcast channel receivers (sync events, wallet events, network events) properly handle `RecvError::Lagged` by logging a warning and triggering reconciliation where appropriate. This is good defensive programming. No action needed. + +### L-5: `cargo audit` Results - Unmaintained Dependencies + +**Description**: `cargo audit` reports no known security vulnerabilities in the dependency tree. Two unmaintained crate warnings were found: +- `async-std 1.13.2` (RUSTSEC-2025-0052) - transitive via `dark-light` +- `bincode 1.3.3` and `2.0.1` (RUSTSEC-2025-0141) - transitive via `grovestark` and `platform-version` + +These are not direct dependencies of the SPV module and carry no known security impact. They are listed here for completeness. + +--- + +## Dependency Vulnerability Research + +### dash-sdk (git dependency, rev d6f4eb9) + +**Vulnerabilities Found**: 0 relevant +**Sources Checked**: NVD, GitHub Advisories, web search + +No known CVEs were found for the `dash-sdk` Rust crate or the DashPay platform repository at the audited revision. The CVE-2024-21485 result from searches relates to the Plotly Dash Python framework, which is unrelated. + +### rusqlite 0.38.0 + +**Vulnerabilities Found**: 0 relevant to v0.38 +**Sources Checked**: CVE Details, NVD, RustSec + +Historical vulnerabilities (use-after-free in `update_hook`, `rollback_hook`, etc.) affect versions before 0.26.2 and are not applicable to v0.38. + +### tokio 1.46.1 + +**Vulnerabilities Found**: 0 relevant +**Sources Checked**: CVE Details, RustSec, web search + +CVE-2025-62518 (TARmageddon) affects `tokio-tar` / `async-tar`, not the `tokio` runtime itself. No known vulnerabilities in tokio 1.46.1. + +### zeroize 1.8.1 + +**Vulnerabilities Found**: 0 relevant +**Sources Checked**: RustSec, web search + +No known CVEs for the zeroize crate. + +### aes-gcm 0.10.3 / argon2 0.5.3 + +**Vulnerabilities Found**: 0 relevant +**Sources Checked**: RustSec, web search + +RUSTSEC-2025-0009 affects `ring`'s AES-GCM implementation (panic on 64GB+ data), not the `aes-gcm` crate. Not applicable. + +### Similar Solution Research + +**Bitcoin SPV light clients** have documented vulnerabilities: +- CVE-2017-12842: Fake SPV proof creation (Bitcoin Core < 0.14). The Dash SPV implementation uses compact block filters (BIP-157/BIP-158) rather than Bloom filters, which mitigates this class of attack. +- Privacy leakage via Bloom filters: Not applicable because the Dash SPV client uses compact block filters (Neutrino-style), which provide better privacy. +- Peer stalling attacks: The `dash-spv` library handles peer management internally via `PeerNetworkManager`. The code properly handles cancellation tokens for clean shutdown, reducing stalling risk. + +--- + +## Positive Security Observations + +1. **Proper seed zeroization**: `seed_bytes` is correctly zeroized on both success and error paths in `load_wallet_from_seed()`. +2. **No sensitive data in logs**: A search for tracing calls containing "seed", "priv", "key", "secret", or "password" returned no matches in the SPV module. +3. **Cancellation-aware shutdown**: The SPV loop properly responds to both local `stop_token` and global `global_cancel` cancellation tokens. +4. **Lock poisoning handled gracefully**: All lock operations use helper methods that return `SpvResult` errors instead of panicking on poisoned locks. +5. **Bounded channels for most operations**: The request channel (32), reconcile channel (64), and finality channel (64) all use bounded mpsc channels. +6. **Clean resource cleanup**: The `run_spv_loop` method cleans up storage, interface, network manager, and request channels on exit regardless of the exit reason. +7. **Broadcast lag recovery**: All broadcast channel receivers handle lagged messages gracefully by triggering reconciliation. + +--- + +## Recommendations Summary + +| ID | Severity | Finding | Action | +|----|----------|---------|--------| +| H-1 | High | xprv_str not zeroized | Zeroize the string after `import_wallet_from_extended_priv_key` | +| M-1 | Medium | Path traversal via devnet_name | Validate devnet_name characters | +| M-2 | Medium | Unbounded command channel | Document SDK constraint; consider monitoring | +| M-3 | Medium | block_in_place may stall runtime | Add timeout to quorum lookup | +| L-1 | Low | DNS resolution without validation | Consider host/port validation | +| L-2 | Low | Internal details in error messages | Use generic user-facing messages | +| L-3 | Low | Wallet wait loop polling | Acceptable as-is (30s timeout) | +| L-5 | Info | Unmaintained transitive deps | Track upstream updates | + +--- + +## Conclusion + +The SPV module demonstrates generally good security practices: proper cancellation handling, graceful lock poisoning recovery, bounded channels for most operations, and no logging of sensitive material. The most significant finding is **H-1** (extended private key string not zeroized), which should be addressed before release. The medium-severity findings (M-1 through M-3) represent defense-in-depth improvements that would strengthen the module's resilience. The refactoring from a separate OS thread to the main tokio runtime does not introduce new security vulnerabilities but does create a tighter coupling that makes M-3 (runtime blocking) more relevant than before. diff --git a/src/backend_task/core/mod.rs b/src/backend_task/core/mod.rs index 398d0a719..797af5348 100644 --- a/src/backend_task/core/mod.rs +++ b/src/backend_task/core/mod.rs @@ -12,6 +12,7 @@ use crate::context::AppContext; use crate::model::wallet::Wallet; use crate::model::wallet::single_key::SingleKeyWallet; use crate::spv::CoreBackendMode; +use dash_sdk::dash_spv::sync::ProgressPercentage; use dash_sdk::dashcore_rpc::RpcApi; use dash_sdk::dashcore_rpc::{Auth, Client}; use dash_sdk::dpp::dashcore::secp256k1::{Message, Secp256k1}; diff --git a/src/backend_task/core/start_dash_qt.rs b/src/backend_task/core/start_dash_qt.rs index 67602f477..25ccf9dae 100644 --- a/src/backend_task/core/start_dash_qt.rs +++ b/src/backend_task/core/start_dash_qt.rs @@ -67,7 +67,7 @@ impl AppContext { // Spawn a task to wait for the Dash-Qt process to exit let cancel = self.subtasks.cancellation_token.clone(); let db = Arc::clone(&self.db); - self.subtasks.spawn_sync(async move { + self.subtasks.spawn_sync("dash_qt_watcher", async move { // Wait for the process to exit or current task to be cancelled tokio::select! { diff --git a/src/context/connection_status.rs b/src/context/connection_status.rs index 836559b75..9672804cf 100644 --- a/src/context/connection_status.rs +++ b/src/context/connection_status.rs @@ -111,6 +111,13 @@ impl ConnectionStatus { self.disable_zmq.store(disable, Ordering::Relaxed); } + /// Reset the throttle timer so the next `trigger_refresh()` fires immediately. + pub fn reset_timer(&self) { + if let Ok(mut last) = self.last_update.lock() { + *last = Instant::now() - REFRESH_CONNECTED; + } + } + pub fn dapi_total_endpoints(&self) -> u16 { self.dapi_total_endpoints.load(Ordering::Relaxed) } @@ -272,7 +279,11 @@ impl ConnectionStatus { Err(poisoned) => poisoned.into_inner(), }; let now = Instant::now(); - let timeout = if self.overall_connected() { + let timeout = if self.spv_status() == SpvStatus::Stopping { + // Poll frequently during SPV shutdown so the UI updates + // within ~200ms of the Stopping → Stopped transition. + Duration::from_millis(200) + } else if self.overall_connected() { REFRESH_CONNECTED } else { REFRESH_DISCONNECTED @@ -299,6 +310,7 @@ impl ConnectionStatus { CoreBackendMode::Spv => { // SPV status is updated elsewhere let spv_status = app_context.spv_manager().status().status; + tracing::trace!("ConnectionStatus: polled SPV status = {:?}", spv_status); self.set_spv_status(spv_status); } CoreBackendMode::Rpc => { diff --git a/src/context/wallet_lifecycle.rs b/src/context/wallet_lifecycle.rs index ceef9c658..8d1117bdd 100644 --- a/src/context/wallet_lifecycle.rs +++ b/src/context/wallet_lifecycle.rs @@ -71,6 +71,12 @@ impl AppContext { self.spv_setup_reconcile_listener(); self.spv_setup_finality_listener(); self.spv_manager.start(expected_wallets)?; + // Immediately reflect the new SPV status in ConnectionStatus so the + // UI sees the change on the next frame instead of waiting for the + // next throttled trigger_refresh() cycle (2-10 seconds). + self.connection_status + .set_spv_status(self.spv_manager.status().status); + self.connection_status.refresh_overall(); Ok(()) } @@ -119,7 +125,7 @@ impl AppContext { fn queue_spv_wallet_load(self: &Arc, seed_hash: WalletSeedHash, seed_bytes: [u8; 64]) { let spv = Arc::clone(&self.spv_manager); - self.subtasks.spawn_sync(async move { + self.subtasks.spawn_sync("spv_wallet_load", async move { if let Err(error) = spv.load_wallet_from_seed(seed_hash, seed_bytes).await { tracing::error!(seed = %hex::encode(seed_hash), %error, "Failed to load SPV wallet from seed"); } @@ -128,7 +134,7 @@ impl AppContext { fn queue_spv_wallet_unload(self: &Arc, seed_hash: WalletSeedHash) { let spv = Arc::clone(&self.spv_manager); - self.subtasks.spawn_sync(async move { + self.subtasks.spawn_sync("spv_wallet_unload", async move { if let Err(error) = spv.unload_wallet(seed_hash).await { tracing::error!(seed = %hex::encode(seed_hash), %error, "Failed to unload SPV wallet"); } @@ -144,17 +150,18 @@ impl AppContext { ) { let ctx = Arc::clone(self); let wallet_clone = Arc::clone(wallet); - self.subtasks.spawn_sync(async move { - if let Err(error) = ctx - .discover_identities_from_wallet(&wallet_clone, max_identity_index) - .await - { - tracing::warn!( - %error, - "Failed to discover identities from wallet" - ); - } - }); + self.subtasks + .spawn_sync("wallet_identity_discovery", async move { + if let Err(error) = ctx + .discover_identities_from_wallet(&wallet_clone, max_identity_index) + .await + { + tracing::warn!( + %error, + "Failed to discover identities from wallet" + ); + } + }); } pub fn bootstrap_loaded_wallets(self: &Arc) { @@ -174,16 +181,17 @@ impl AppContext { if self.core_backend_mode() == CoreBackendMode::Rpc { for wallet in wallets { let ctx = Arc::clone(self); - self.subtasks.spawn_sync(async move { - if let Err(e) = - tokio::task::spawn_blocking(move || ctx.refresh_wallet_info(wallet)) - .await - .map_err(|e| format!("Task join error: {}", e)) - .and_then(|r| r.map(|_| ())) - { - tracing::warn!("Failed to auto-refresh wallet UTXOs on startup: {}", e); - } - }); + self.subtasks + .spawn_sync("refresh_wallet_utxos", async move { + if let Err(e) = + tokio::task::spawn_blocking(move || ctx.refresh_wallet_info(wallet)) + .await + .map_err(|e| format!("Task join error: {}", e)) + .and_then(|r| r.map(|_| ())) + { + tracing::warn!("Failed to auto-refresh wallet UTXOs on startup: {}", e); + } + }); } let single_key_wallets: Vec<_> = { @@ -192,20 +200,21 @@ impl AppContext { }; for wallet in single_key_wallets { let ctx = Arc::clone(self); - self.subtasks.spawn_sync(async move { - if let Err(e) = tokio::task::spawn_blocking(move || { - ctx.refresh_single_key_wallet_info(wallet) - }) - .await - .map_err(|e| format!("Task join error: {}", e)) - .and_then(|r| r) - { - tracing::warn!( - "Failed to auto-refresh single key wallet UTXOs on startup: {}", - e - ); - } - }); + self.subtasks + .spawn_sync("refresh_single_key_wallet_utxos", async move { + if let Err(e) = tokio::task::spawn_blocking(move || { + ctx.refresh_single_key_wallet_info(wallet) + }) + .await + .map_err(|e| format!("Task join error: {}", e)) + .and_then(|r| r) + { + tracing::warn!( + "Failed to auto-refresh single key wallet UTXOs on startup: {}", + e + ); + } + }); } } } @@ -405,14 +414,29 @@ impl AppContext { pub fn spv_setup_finality_listener(self: &Arc) { let rx = self.spv_manager.register_finality_channel(); let ctx = Arc::clone(self); - self.subtasks.spawn_sync(async move { - tokio::pin!(rx); - while let Some(event) = rx.recv().await { - if let Err(e) = ctx.handle_spv_finality_event(event).await { - tracing::debug!("SPV finality event error: {}", e); + let cancel = self.subtasks.cancellation_token.clone(); + self.subtasks + .spawn_sync("spv_finality_listener", async move { + tokio::pin!(rx); + loop { + tokio::select! { + _ = cancel.cancelled() => break, + maybe = rx.recv() => { + let Some(event) = maybe else { break; }; + // Wrap handler in select so cancellation can interrupt + // even when blocked on locks held by the SPV sync thread. + tokio::select! { + _ = cancel.cancelled() => break, + result = ctx.handle_spv_finality_event(event) => { + if let Err(e) = result { + tracing::debug!("SPV finality event error: {}", e); + } + } + } + } + } } - } - }); + }); } async fn handle_spv_finality_event(&self, event: AssetLockFinalityEvent) -> Result<(), String> { @@ -488,20 +512,37 @@ impl AppContext { use tokio::time::{Duration, Instant, sleep}; let rx = self.spv_manager.register_reconcile_channel(); let ctx = Arc::clone(self); - self.subtasks.spawn_sync(async move { + let cancel = self.subtasks.cancellation_token.clone(); + self.subtasks.spawn_sync("spv_reconcile_listener", async move { tokio::pin!(rx); let mut last = Instant::now(); loop { tokio::select! { + _ = cancel.cancelled() => break, maybe = rx.recv() => { if maybe.is_none() { break; } // simple debounce window if last.elapsed() > Duration::from_millis(300) { - if let Err(e) = ctx.reconcile_spv_wallets().await { tracing::debug!("SPV reconcile error: {}", e); } + // Wrap in select so cancellation can interrupt when + // blocked on locks held by the SPV sync thread. + tokio::select! { + _ = cancel.cancelled() => break, + result = ctx.reconcile_spv_wallets() => { + if let Err(e) = result { tracing::debug!("SPV reconcile error: {}", e); } + } + } last = Instant::now(); } else { - sleep(Duration::from_millis(300)).await; - if let Err(e) = ctx.reconcile_spv_wallets().await { tracing::debug!("SPV reconcile error: {}", e); } + tokio::select! { + _ = cancel.cancelled() => break, + _ = sleep(Duration::from_millis(300)) => {} + } + tokio::select! { + _ = cancel.cancelled() => break, + result = ctx.reconcile_spv_wallets() => { + if let Err(e) = result { tracing::debug!("SPV reconcile error: {}", e); } + } + } last = Instant::now(); } } @@ -727,5 +768,14 @@ impl AppContext { pub fn stop_spv(&self) { self.spv_manager.stop(); + // Immediately reflect the new SPV status in ConnectionStatus so the + // UI sees the change on the next frame instead of waiting for the + // next throttled trigger_refresh() cycle (2-10 seconds). + self.connection_status + .set_spv_status(self.spv_manager.status().status); + self.connection_status.refresh_overall(); + // Reset the throttle timer so trigger_refresh() starts polling + // at 200ms intervals and picks up the Stopped transition quickly. + self.connection_status.reset_timer(); } } diff --git a/src/spv/manager.rs b/src/spv/manager.rs index 09b153881..a2e9ddcd3 100644 --- a/src/spv/manager.rs +++ b/src/spv/manager.rs @@ -137,9 +137,9 @@ pub(crate) enum AssetLockFinalityEvent { } /// Manages SPV client lifecycle and exposes status updates. -/// Uses dash-spv's built-in state management while maintaining a dedicated runtime for performance. +/// Uses dash-spv's built-in state management, running as a spawned task on the main tokio runtime. /// -/// The client itself is owned by the background runtime thread and accessed through +/// The client itself is owned by the background task and accessed through /// its internally-shared components (wallet, storage, etc.) rather than through additional locking. pub struct SpvManager { network: Network, @@ -394,7 +394,10 @@ impl SpvManager { self.write_progress_updated_at(None) .map_err(|e| e.to_string())?; - let stop_token = CancellationToken::new(); + // Derive stop_token as a child of the global cancellation token so that + // global shutdown automatically cancels SPV without requiring an explicit + // SpvManager::stop() call. SpvManager::stop() can still cancel it early. + let stop_token = self.subtasks.cancellation_token.child_token(); { let mut guard = self .stop_token @@ -404,50 +407,42 @@ impl SpvManager { } let manager = Arc::clone(self); - let global_cancel = self.subtasks.cancellation_token.clone(); - - // Spawn a dedicated OS thread with a multi-thread Tokio runtime for SPV operations - // This ensures SPV sync doesn't compete with UI thread resources - std::thread::Builder::new() - .name("spv".to_string()) - .spawn(move || { - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(4) - .enable_all() - .thread_name("spv-rt") - .build() - .expect("Failed to create SPV runtime"); - - rt.block_on(async move { - let manager_for_loop = Arc::clone(&manager); - if let Err(err) = manager_for_loop.run_spv_loop(stop_token, global_cancel, expected_wallet_count).await { - tracing::error!(error = %err, network = ?manager.network, "SPV runtime failed"); - if let Err(e) = manager.write_last_error(Some(err.clone())) { - tracing::error!("Failed to write SPV error: {}", e); - } - if let Err(e) = manager.write_status(SpvStatus::Error) { - tracing::error!("Failed to write SPV status: {}", e); - } - } - // Clean up on exit - if let Ok(mut guard) = manager.stop_token.lock() { - *guard = None; - } - }); - }) - .map_err(|e| format!("Failed to spawn SPV thread: {e}"))?; + // Spawn SPV loop as a task on the main tokio runtime + self.subtasks.spawn_sync("spv_main_loop", async move { + let manager_for_loop = Arc::clone(&manager); + if let Err(err) = manager_for_loop + .run_spv_loop(stop_token, expected_wallet_count) + .await + { + tracing::error!(error = %err, network = ?manager.network, "SPV runtime failed"); + if let Err(e) = manager.write_last_error(Some(err.clone())) { + tracing::error!("Failed to write SPV error: {}", e); + } + if let Err(e) = manager.write_status(SpvStatus::Error) { + tracing::error!("Failed to write SPV status: {}", e); + } + } + + // Clean up on exit + if let Ok(mut guard) = manager.stop_token.lock() { + *guard = None; + } + }); Ok(()) } pub fn stop(&self) { + tracing::info!("SpvManager::stop() called"); let maybe_token = self.stop_token.lock().ok().and_then(|g| g.clone()); if let Some(token) = maybe_token { + tracing::info!("SpvManager::stop(): cancelling stop_token, setting Stopping"); let _ = self.write_status(SpvStatus::Stopping); token.cancel(); } else { + tracing::debug!("SpvManager::stop(): no stop_token, setting Stopped immediately"); let _ = self.write_status(SpvStatus::Stopped); } } @@ -706,14 +701,21 @@ impl SpvManager { format!("ExtendedPrivKey::new_master failed: {e}") })?; seed_bytes.zeroize(); - let xprv_str = xprv.to_string(); + let mut xprv_str = xprv.to_string(); let account_options = Self::default_account_creation_options(); let wallet_id = match wm.import_wallet_from_extended_priv_key(&xprv_str, account_options) { - Ok(id) => id, - Err(WalletError::WalletExists(id)) => id, + Ok(id) => { + xprv_str.zeroize(); + id + } + Err(WalletError::WalletExists(id)) => { + xprv_str.zeroize(); + id + } Err(err) => { + xprv_str.zeroize(); return Err(format!( "import_wallet_from_extended_priv_key failed: {err}" )); @@ -783,7 +785,6 @@ impl SpvManager { async fn run_spv_loop( self: Arc, stop_token: CancellationToken, - global_cancel: CancellationToken, expected_wallet_count: usize, ) -> Result<(), String> { // Wait for all expected wallets to be fully loaded into the WalletManager @@ -817,7 +818,7 @@ impl SpvManager { ); break; } - if stop_token.is_cancelled() || global_cancel.is_cancelled() { + if stop_token.is_cancelled() { return Ok(()); } tokio::time::sleep(std::time::Duration::from_millis(50)).await; @@ -890,7 +891,7 @@ impl SpvManager { // Run sync and monitor with the client owned in this scope let result = self .clone() - .run_sync_and_monitor(client, command_receiver, stop_token, global_cancel) + .run_sync_and_monitor(client, command_receiver, stop_token) .await; // Clear the interface and network manager since the client is done @@ -924,14 +925,12 @@ impl SpvManager { mut client: SpvClient, command_receiver: mpsc::UnboundedReceiver, stop_token: CancellationToken, - global_cancel: CancellationToken, ) -> Result<(), String> { // Monitor network continuously - this handles initial sync and ongoing monitoring // Requests are handled through the DashSpvClientInterface command channel enum Outcome { MonitorCompleted(Result<(), dash_sdk::dash_spv::SpvError>), - StopRequested, - GlobalCancelled, + Cancelled, } let outcome = { @@ -939,21 +938,34 @@ impl SpvManager { let monitor_future = client.monitor_network(command_receiver, monitor_cancel.clone()); tokio::pin!(monitor_future); + // stop_token is a child of global_cancel, so it fires on either + // explicit SpvManager::stop() or application-wide shutdown. tokio::select! { result = &mut monitor_future => Outcome::MonitorCompleted(result), _ = stop_token.cancelled() => { monitor_cancel.cancel(); - Outcome::StopRequested - }, - _ = global_cancel.cancelled() => { - monitor_cancel.cancel(); - Outcome::GlobalCancelled + Outcome::Cancelled }, } }; // monitor_future is dropped here, releasing the mutable borrow + tracing::info!( + "run_sync_and_monitor: outcome = {}", + match &outcome { + Outcome::MonitorCompleted(Ok(())) => "MonitorCompleted(Ok)", + Outcome::MonitorCompleted(Err(_)) => "MonitorCompleted(Err)", + Outcome::Cancelled => "Cancelled", + } + ); + // Stop the client after monitoring completes or is cancelled + tracing::info!("run_sync_and_monitor: calling client.stop()..."); + let stop_start = std::time::Instant::now(); let _ = client.stop().await; + tracing::info!( + "run_sync_and_monitor: client.stop() took {:?}", + stop_start.elapsed() + ); match outcome { Outcome::MonitorCompleted(Ok(())) => { @@ -966,7 +978,7 @@ impl SpvManager { let _ = self.write_status(SpvStatus::Error); Err(message) } - Outcome::StopRequested | Outcome::GlobalCancelled => { + Outcome::Cancelled => { let _ = self.write_status(SpvStatus::Stopped); Ok(()) } @@ -980,7 +992,7 @@ impl SpvManager { ) { tracing::info!("SPV request handler started"); let network_manager = Arc::clone(&self.network_manager); - self.subtasks.spawn_sync(async move { + self.subtasks.spawn_sync("spv_request_handler", async move { loop { tokio::select! { _ = cancel.cancelled() => { @@ -1041,7 +1053,7 @@ impl SpvManager { let progress_updated_at = Arc::clone(&self.progress_updated_at); let cancel = self.subtasks.cancellation_token.clone(); - self.subtasks.spawn_sync(async move { + self.subtasks.spawn_sync("spv_progress_watcher", async move { loop { tokio::select! { _ = cancel.cancelled() => break, @@ -1081,7 +1093,7 @@ impl SpvManager { let status = Arc::clone(&self.status); let cancel = self.subtasks.cancellation_token.clone(); - self.subtasks.spawn_sync(async move { + self.subtasks.spawn_sync("spv_sync_event_handler", async move { loop { tokio::select! { _ = cancel.cancelled() => break, @@ -1152,32 +1164,33 @@ impl SpvManager { let reconcile_tx = self.reconcile_tx.lock().ok().and_then(|g| g.clone()); let cancel = self.subtasks.cancellation_token.clone(); - self.subtasks.spawn_sync(async move { - loop { - tokio::select! { - _ = cancel.cancelled() => break, - result = wallet_rx.recv() => { - match result { - Ok(_event) => { - // All wallet events trigger reconcile - if let Some(ref tx) = reconcile_tx { - let _ = tx.try_send(()); + self.subtasks + .spawn_sync("spv_wallet_event_handler", async move { + loop { + tokio::select! { + _ = cancel.cancelled() => break, + result = wallet_rx.recv() => { + match result { + Ok(_event) => { + // All wallet events trigger reconcile + if let Some(ref tx) = reconcile_tx { + let _ = tx.try_send(()); + } } - } - Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => { - tracing::warn!("Wallet event handler lagged by {} events", n); - // Still trigger reconcile to catch up - if let Some(ref tx) = reconcile_tx { - let _ = tx.try_send(()); + Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => { + tracing::warn!("Wallet event handler lagged by {} events", n); + // Still trigger reconcile to catch up + if let Some(ref tx) = reconcile_tx { + let _ = tx.try_send(()); + } } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, } - Err(tokio::sync::broadcast::error::RecvError::Closed) => break, } } } - } - tracing::info!("SPV wallet event handler exiting"); - }); + tracing::info!("SPV wallet event handler exiting"); + }); } fn spawn_network_event_handler( @@ -1187,30 +1200,31 @@ impl SpvManager { let connected_peers = Arc::clone(&self.connected_peers); let cancel = self.subtasks.cancellation_token.clone(); - self.subtasks.spawn_sync(async move { - loop { - tokio::select! { - _ = cancel.cancelled() => break, - result = net_rx.recv() => { - match result { - Ok(NetworkEvent::PeersUpdated { connected_count, .. }) => { - if let Ok(mut guard) = connected_peers.write() { - *guard = connected_count; + self.subtasks + .spawn_sync("spv_network_event_handler", async move { + loop { + tokio::select! { + _ = cancel.cancelled() => break, + result = net_rx.recv() => { + match result { + Ok(NetworkEvent::PeersUpdated { connected_count, .. }) => { + if let Ok(mut guard) = connected_peers.write() { + *guard = connected_count; + } } + Ok(_) => { + // PeerConnected / PeerDisconnected — PeersUpdated follows + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => { + tracing::warn!("Network event handler lagged by {} events", n); + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, } - Ok(_) => { - // PeerConnected / PeerDisconnected — PeersUpdated follows - } - Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => { - tracing::warn!("Network event handler lagged by {} events", n); - } - Err(tokio::sync::broadcast::error::RecvError::Closed) => break, } } } - } - tracing::info!("SPV network event handler exiting"); - }); + tracing::info!("SPV network event handler exiting"); + }); } async fn build_client( @@ -1300,10 +1314,28 @@ fn build_spv_data_dir(network: Network, config: &NetworkConfig) -> Result "mainnet".to_string(), Network::Testnet => "testnet".to_string(), - Network::Devnet => config - .devnet_name - .clone() - .unwrap_or_else(|| "devnet".to_string()), + Network::Devnet => { + let name = config + .devnet_name + .clone() + .unwrap_or_else(|| "devnet".to_string()); + // Sanitize to prevent path traversal (e.g. "../../etc") + let sanitized: String = name + .chars() + .map(|c| { + if c.is_alphanumeric() || c == '-' || c == '_' { + c + } else { + '_' + } + }) + .collect(); + if sanitized.is_empty() { + "devnet".to_string() + } else { + sanitized + } + } Network::Regtest => "regtest".to_string(), other => format!("{other:?}"), }; diff --git a/src/spv/mod.rs b/src/spv/mod.rs index 022d2c569..5db1179c7 100644 --- a/src/spv/mod.rs +++ b/src/spv/mod.rs @@ -1,5 +1,7 @@ mod error; pub(crate) mod manager; +#[cfg(test)] +mod tests; pub use error::{SpvError, SpvResult}; pub(crate) use manager::AssetLockFinalityEvent; diff --git a/src/spv/tests.rs b/src/spv/tests.rs new file mode 100644 index 000000000..776a756e0 --- /dev/null +++ b/src/spv/tests.rs @@ -0,0 +1,622 @@ +//! Integration tests for SpvManager lifecycle, concurrency, and state transitions. + +use crate::config::NetworkConfig; +use crate::spv::SpvStatus; +use crate::spv::manager::SpvManager; +use crate::utils::tasks::TaskManager; +use dash_sdk::dpp::dashcore::Network; +use std::sync::{Arc, RwLock}; +use tokio::time::{Duration, timeout}; + +/// Deadlock detection timeout: if any operation takes longer than this, +/// the test fails (likely a deadlock). +const DEADLOCK_TIMEOUT: Duration = Duration::from_secs(10); + +/// Create a minimal testnet NetworkConfig for testing. +fn test_network_config() -> NetworkConfig { + NetworkConfig { + dapi_addresses: "https://127.0.0.1:1443".to_string(), + core_host: "127.0.0.1".to_string(), + core_rpc_port: 19998, + core_rpc_user: "dashrpc".to_string(), + core_rpc_password: "password".to_string(), + insight_api_url: "https://testnet-insight.dash.org/insight-api".to_string(), + core_zmq_endpoint: Some("tcp://127.0.0.1:23709".to_string()), + devnet_name: None, + wallet_private_key: None, + show_in_ui: true, + } +} + +/// Create an SpvManager for testing. Uses testnet config and a fresh TaskManager. +fn create_test_manager() -> (Arc, Arc) { + let config = Arc::new(RwLock::new(test_network_config())); + let task_manager = Arc::new(TaskManager::new()); + let manager = SpvManager::new(Network::Testnet, config, task_manager.clone()) + .expect("SpvManager::new should succeed"); + (manager, task_manager) +} + +// ── Construction and initial state ─────────────────────────────── + +/// Given a freshly constructed SpvManager, +/// When reading the status snapshot, +/// Then status is Idle, no error, no start time, no progress, and 0 peers. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_new_manager_has_idle_status() { + let (manager, _tm) = create_test_manager(); + let snapshot = manager.status(); + assert_eq!( + snapshot.status, + SpvStatus::Idle, + "New manager should be Idle" + ); + assert!( + snapshot.last_error.is_none(), + "New manager should have no error" + ); + assert!( + snapshot.started_at.is_none(), + "New manager should have no started_at" + ); + assert!( + snapshot.sync_progress.is_none(), + "New manager should have no sync progress" + ); + assert_eq!( + snapshot.connected_peers, 0, + "New manager should have 0 connected peers" + ); +} + +/// Given a freshly constructed SpvManager, +/// When taking multiple sync and async snapshots in sequence, +/// Then all snapshots are consistent (Idle, no error, 0 peers). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_status_snapshot_consistency() { + let (manager, _tm) = create_test_manager(); + + for _ in 0..10 { + let sync_snapshot = manager.status(); + let async_snapshot = manager.status_async().await; + + assert_eq!(sync_snapshot.status, SpvStatus::Idle); + assert_eq!(async_snapshot.status, SpvStatus::Idle); + assert!(sync_snapshot.last_error.is_none()); + assert!(async_snapshot.last_error.is_none()); + assert_eq!(sync_snapshot.connected_peers, 0); + assert_eq!(async_snapshot.connected_peers, 0); + } +} + +// ── Stop when idle ─────────────────────────────────────────────── + +/// Given an idle SpvManager that has never been started, +/// When calling stop(), +/// Then it completes without panic or deadlock and sets status to Stopped. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_stop_when_idle_does_not_panic() { + let (manager, _tm) = create_test_manager(); + + let result = timeout(DEADLOCK_TIMEOUT, async { + manager.stop(); + }) + .await; + assert!( + result.is_ok(), + "stop() should complete within timeout (no deadlock)" + ); + + let snapshot = manager.status(); + assert_eq!( + snapshot.status, + SpvStatus::Stopped, + "stop() on idle manager should set status to Stopped" + ); +} + +/// Given an idle SpvManager, +/// When calling stop() twice in succession, +/// Then both calls complete without panic or deadlock. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_double_stop_does_not_panic() { + let (manager, _tm) = create_test_manager(); + + let result = timeout(DEADLOCK_TIMEOUT, async { + manager.stop(); + manager.stop(); + }) + .await; + assert!( + result.is_ok(), + "Double stop() should complete within timeout" + ); + + let snapshot = manager.status(); + assert_eq!(snapshot.status, SpvStatus::Stopped); +} + +// ── use_local_node flag ────────────────────────────────────────── + +/// Given a freshly constructed SpvManager, +/// When toggling use_local_node on and off, +/// Then the getter reflects each change correctly. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_use_local_node_toggle() { + let (manager, _tm) = create_test_manager(); + + assert!(!manager.use_local_node(), "Default should be false"); + manager.set_use_local_node(true); + assert!(manager.use_local_node(), "Should be true after set"); + manager.set_use_local_node(false); + assert!(!manager.use_local_node(), "Should be false after reset"); +} + +// ── clear_data_dir when idle ───────────────────────────────────── + +/// Given an idle SpvManager, +/// When calling clear_data_dir(), +/// Then it succeeds and the status remains Idle. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_clear_data_dir_when_idle() { + let (manager, _tm) = create_test_manager(); + + let result = timeout(DEADLOCK_TIMEOUT, async { manager.clear_data_dir() }).await; + assert!( + result.is_ok(), + "clear_data_dir() should complete within timeout" + ); + let clear_result = result.unwrap(); + assert!( + clear_result.is_ok(), + "clear_data_dir() should succeed when idle: {:?}", + clear_result.err() + ); + + let snapshot = manager.status(); + assert_eq!(snapshot.status, SpvStatus::Idle); +} + +// ── Concurrent status reads ────────────────────────────────────── + +/// Given an idle SpvManager shared across 20 concurrent tasks, +/// When each task reads the status (sync and async) 100 times, +/// Then all reads complete within the deadlock timeout without panic. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_concurrent_status_reads_no_deadlock() { + let (manager, _tm) = create_test_manager(); + + let result = timeout(DEADLOCK_TIMEOUT, async { + let mut handles = Vec::new(); + for _ in 0..20 { + let mgr = Arc::clone(&manager); + handles.push(tokio::spawn(async move { + for _ in 0..100 { + let _snapshot = mgr.status(); + let _async_snapshot = mgr.status_async().await; + tokio::task::yield_now().await; + } + })); + } + for handle in handles { + handle.await.expect("Task should not panic"); + } + }) + .await; + + assert!( + result.is_ok(), + "Concurrent status reads should complete within timeout (no deadlock)" + ); +} + +// ── Start lifecycle (no network) ───────────────────────────────── + +/// Given an idle SpvManager with no wallets, +/// When calling start(0), +/// Then it returns Ok and the status transitions to Starting (or Syncing/Error +/// if the background task progresses before the assertion). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_start_sets_starting_status() { + let (manager, tm) = create_test_manager(); + + let start_result = manager.start(0); + assert!( + start_result.is_ok(), + "start() should return Ok: {:?}", + start_result.err() + ); + + let snapshot = manager.status(); + assert!( + snapshot.status == SpvStatus::Starting + || snapshot.status == SpvStatus::Syncing + || snapshot.status == SpvStatus::Error, + "After start(), status should be Starting, Syncing, or Error (if network fails fast), got: {:?}", + snapshot.status + ); + + manager.stop(); + tokio::time::sleep(Duration::from_millis(200)).await; + let _ = tm.shutdown(); +} + +/// Given an already-started SpvManager, +/// When calling start() a second time, +/// Then it returns Ok without spawning a duplicate loop (idempotent). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_double_start_is_idempotent() { + let (manager, tm) = create_test_manager(); + + let first = manager.start(0); + assert!(first.is_ok(), "First start() should succeed"); + + tokio::time::sleep(Duration::from_millis(100)).await; + + let second = manager.start(0); + assert!(second.is_ok(), "Second start() should succeed (idempotent)"); + + manager.stop(); + tokio::time::sleep(Duration::from_millis(200)).await; + let _ = tm.shutdown(); +} + +// ── Start + Stop lifecycle (clean shutdown) ────────────────────── + +/// Given a started SpvManager, +/// When calling stop() and polling for completion, +/// Then the status reaches Stopped (or Error) within the deadlock timeout. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_start_stop_clean_shutdown() { + let (manager, tm) = create_test_manager(); + + manager.start(0).expect("start() should succeed"); + + tokio::time::sleep(Duration::from_millis(100)).await; + + let result = timeout(DEADLOCK_TIMEOUT, async { + manager.stop(); + for _ in 0..100 { + let snapshot = manager.status(); + if snapshot.status == SpvStatus::Stopped || snapshot.status == SpvStatus::Error { + return snapshot.status; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + manager.status().status + }) + .await; + + assert!( + result.is_ok(), + "Stop should complete within timeout (no deadlock)" + ); + let final_status = result.unwrap(); + assert!( + final_status == SpvStatus::Stopped || final_status == SpvStatus::Error, + "After stop(), status should be Stopped or Error, got: {:?}", + final_status + ); + + let _ = tm.shutdown(); +} + +// ── Rapid start/stop ───────────────────────────────────────────── + +/// Given a SpvManager, +/// When performing 5 rapid start/stop cycles, +/// Then all cycles complete without panic or deadlock. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_rapid_start_stop_no_panic() { + let (manager, tm) = create_test_manager(); + + let result = timeout(DEADLOCK_TIMEOUT, async { + for _ in 0..5 { + let _ = manager.start(0); + tokio::time::sleep(Duration::from_millis(50)).await; + manager.stop(); + tokio::time::sleep(Duration::from_millis(200)).await; + } + }) + .await; + + assert!( + result.is_ok(), + "Rapid start/stop cycles should complete within timeout (no deadlock or panic)" + ); + + let _ = tm.shutdown(); +} + +// ── Concurrent status reads during start/stop ──────────────────── + +/// Given a SpvManager with 10 concurrent reader tasks, +/// When performing a start/stop lifecycle while readers are active, +/// Then all readers complete without panic or deadlock. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_concurrent_reads_during_lifecycle() { + let (manager, tm) = create_test_manager(); + + let result = timeout(DEADLOCK_TIMEOUT, async { + let mut readers = Vec::new(); + for _ in 0..10 { + let mgr = Arc::clone(&manager); + readers.push(tokio::spawn(async move { + for _ in 0..50 { + let snapshot = mgr.status(); + let _ = snapshot.status.is_active(); + let _ = snapshot.last_error; + let _ = snapshot.connected_peers; + tokio::task::yield_now().await; + } + })); + } + + let _ = manager.start(0); + tokio::time::sleep(Duration::from_millis(100)).await; + manager.stop(); + tokio::time::sleep(Duration::from_millis(200)).await; + + for r in readers { + r.await.expect("Reader task should not panic"); + } + }) + .await; + + assert!( + result.is_ok(), + "Concurrent reads during lifecycle should complete without deadlock" + ); + + let _ = tm.shutdown(); +} + +// ── SpvStatus helper methods ───────────────────────────────────── + +/// Given all SpvStatus variants, +/// When calling is_active(), +/// Then Starting, Syncing, Running, and Stopping return true; others return false. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_spv_status_is_active() { + assert!(!SpvStatus::Idle.is_active()); + assert!(SpvStatus::Starting.is_active()); + assert!(SpvStatus::Syncing.is_active()); + assert!(SpvStatus::Running.is_active()); + assert!(SpvStatus::Stopping.is_active()); + assert!(!SpvStatus::Stopped.is_active()); + assert!(!SpvStatus::Error.is_active()); +} + +/// Given u8 values 0 through 6 and out-of-range values, +/// When converting via From, +/// Then each maps to the correct SpvStatus variant (out-of-range defaults to Idle). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_spv_status_from_u8_roundtrip() { + for val in 0u8..=6 { + let status = SpvStatus::from(val); + let display = format!("{}", status); + assert!( + !display.is_empty(), + "Display should not be empty for value {}", + val + ); + } + assert_eq!(SpvStatus::from(255), SpvStatus::Idle); + assert_eq!(SpvStatus::from(7), SpvStatus::Idle); +} + +// ── Wallet operations on idle manager ──────────────────────────── + +/// Given a freshly constructed SpvManager with no wallets loaded, +/// When calling det_wallets_snapshot(), +/// Then the returned map is empty. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_det_wallets_snapshot_empty() { + let (manager, _tm) = create_test_manager(); + let wallets = manager.det_wallets_snapshot(); + assert!(wallets.is_empty(), "New manager should have no wallets"); +} + +/// Given a freshly constructed SpvManager, +/// When looking up a wallet by an unknown seed hash, +/// Then None is returned. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_wallet_id_for_seed_returns_none() { + let (manager, _tm) = create_test_manager(); + let seed_hash = [0u8; 32]; + assert!( + manager.wallet_id_for_seed(seed_hash).is_none(), + "Unknown seed hash should return None" + ); +} + +// ── Reconcile and finality channels ────────────────────────────── + +/// Given a freshly constructed SpvManager, +/// When registering a reconcile channel and immediately trying to receive, +/// Then the channel is open but empty (no signals yet). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_register_reconcile_channel() { + let (manager, _tm) = create_test_manager(); + let mut rx = manager.register_reconcile_channel(); + + let result = rx.try_recv(); + assert!(result.is_err(), "Channel should be empty initially"); +} + +/// Given a freshly constructed SpvManager, +/// When registering a finality channel and immediately trying to receive, +/// Then the channel is open but empty (no events yet). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_register_finality_channel() { + let (manager, _tm) = create_test_manager(); + let mut rx = manager.register_finality_channel(); + + let result = rx.try_recv(); + assert!(result.is_err(), "Channel should be empty initially"); +} + +// ── Broadcast transaction on idle manager ──────────────────────── + +/// Given an idle SpvManager that has not been started, +/// When attempting to broadcast a transaction, +/// Then the call fails with an error indicating SPV is not running. +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_broadcast_transaction_fails_when_not_running() { + let (manager, _tm) = create_test_manager(); + + let tx = dash_sdk::dpp::dashcore::Transaction { + version: 2, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + + let result = manager.broadcast_transaction(&tx).await; + assert!( + result.is_err(), + "Broadcast should fail when SPV is not running" + ); + let err = result.unwrap_err(); + assert!( + err.contains("not running"), + "Error should mention not running, got: {}", + err + ); +} + +// ── CoreBackendMode ────────────────────────────────────────────── + +/// Given CoreBackendMode variants, +/// When converting between u8 and enum via From and as_u8(), +/// Then roundtrip is correct (0 = Rpc, 1 = Spv, unknown defaults to Rpc). +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_core_backend_mode_roundtrip() { + use crate::spv::CoreBackendMode; + + assert_eq!(CoreBackendMode::from(0), CoreBackendMode::Rpc); + assert_eq!(CoreBackendMode::from(1), CoreBackendMode::Spv); + assert_eq!(CoreBackendMode::from(99), CoreBackendMode::Rpc); // default + + assert_eq!(CoreBackendMode::Rpc.as_u8(), 0); + assert_eq!(CoreBackendMode::Spv.as_u8(), 1); +} + +// ── Live testnet sync ──────────────────────────────────────────── + +/// Parse `.env.example` from the project root and extract the TESTNET_ NetworkConfig. +fn load_testnet_config_from_env_example() -> NetworkConfig { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let env_path = std::path::Path::new(manifest_dir).join(".env.example"); + assert!( + env_path.exists(), + ".env.example not found at {}", + env_path.display() + ); + + let contents = std::fs::read_to_string(&env_path).expect("Failed to read .env.example"); + + let mut vars = std::collections::HashMap::new(); + for line in contents.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + if let Some((key, value)) = line.split_once('=') { + vars.insert(key.to_string(), value.to_string()); + } + } + + envy::prefixed("TESTNET_") + .from_iter(vars) + .expect("Failed to parse TESTNET_ config from .env.example") +} + +/// Given an SpvManager configured with real testnet DAPI addresses from `.env.example`, +/// When starting SPV sync with no wallets, letting it sync for 10 seconds, then stopping, +/// Then at least one peer connects, sync progress is reported, and shutdown completes +/// within 15 seconds without deadlock. +#[ignore] // Requires network access to Dash testnet peers +#[tokio::test(flavor = "multi_thread", worker_threads = 12)] +async fn test_live_testnet_sync_and_shutdown() { + let testnet_config = load_testnet_config_from_env_example(); + let config = Arc::new(RwLock::new(testnet_config)); + let task_manager = Arc::new(TaskManager::new()); + let manager = SpvManager::new(Network::Testnet, config, task_manager.clone()) + .expect("SpvManager::new should succeed"); + + // Start SPV with no wallets (header-only sync to chain tip) + manager.start(0).expect("start() should succeed"); + + // Wait for peers to connect (up to 30s) + let connect_timeout = Duration::from_secs(30); + let connect_result = timeout(connect_timeout, async { + loop { + let snapshot = manager.status_async().await; + if snapshot.connected_peers > 0 { + return snapshot; + } + if snapshot.status == SpvStatus::Error + && let Some(ref err) = snapshot.last_error + { + eprintln!("SPV reported error during peer discovery: {}", err); + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + }) + .await; + + assert!( + connect_result.is_ok(), + "Should connect to at least one testnet peer within 30s" + ); + let snapshot = connect_result.unwrap(); + eprintln!( + "Connected to {} peer(s), status: {:?}", + snapshot.connected_peers, snapshot.status + ); + + // Let the sync run for 10 seconds to exercise the full pipeline + eprintln!("Letting sync run for 10 seconds..."); + tokio::time::sleep(Duration::from_secs(10)).await; + + // Capture state after syncing + let snapshot = manager.status_async().await; + eprintln!( + "After 10s sync: status={:?}, peers={}, progress={:?}", + snapshot.status, snapshot.connected_peers, snapshot.sync_progress + ); + assert!( + snapshot.sync_progress.is_some(), + "Should have received sync progress after 10s of syncing" + ); + + // Shutdown must complete within 15 seconds -- timeout means deadlock + let shutdown_timeout = Duration::from_secs(15); + let shutdown_result = timeout(shutdown_timeout, async { + manager.stop(); + loop { + let snapshot = manager.status_async().await; + if snapshot.status == SpvStatus::Stopped || snapshot.status == SpvStatus::Error { + return snapshot.status; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await; + + assert!( + shutdown_result.is_ok(), + "Shutdown MUST complete within 15s -- timeout indicates a deadlock" + ); + let final_status = shutdown_result.unwrap(); + assert_eq!( + final_status, + SpvStatus::Stopped, + "Final status should be Stopped after clean shutdown, got: {:?}", + final_status + ); + + let _ = task_manager.shutdown(); +} diff --git a/src/ui/network_chooser_screen.rs b/src/ui/network_chooser_screen.rs index e723d496b..961b1cbfe 100644 --- a/src/ui/network_chooser_screen.rs +++ b/src/ui/network_chooser_screen.rs @@ -16,7 +16,7 @@ use crate::ui::components::top_panel::add_top_panel; use crate::ui::theme::{DashColors, Shape, ThemeMode}; use crate::ui::{RootScreenType, ScreenLike}; use crate::utils::path::format_path_for_display; -use dash_sdk::dash_spv::sync::{SyncProgress as SpvSyncProgress, SyncState}; +use dash_sdk::dash_spv::sync::{ProgressPercentage, SyncProgress as SpvSyncProgress, SyncState}; use dash_sdk::dpp::dashcore::Network; use dash_sdk::dpp::identity::TimestampMillis; use eframe::egui::{self, Color32, Context, Frame, Margin, RichText, Ui}; @@ -498,6 +498,7 @@ impl NetworkChooserScreen { ui.horizontal(|ui| { if overall_connected { if current_backend_mode == CoreBackendMode::Spv { + let is_stopping = spv_status == SpvStatus::Stopping; let disconnect_button = egui::Button::new( egui::RichText::new("Disconnect").color(DashColors::WHITE), ) @@ -506,7 +507,10 @@ impl NetworkChooserScreen { .corner_radius(Shape::RADIUS_MD) .min_size(egui::vec2(120.0, 36.0)); - if ui.add(disconnect_button).clicked() { + if ui + .add_enabled(!is_stopping, disconnect_button) + .clicked() + { self.current_app_context().stop_spv(); } diff --git a/src/ui/theme.rs b/src/ui/theme.rs index 4b2be45d8..5b82a028b 100644 --- a/src/ui/theme.rs +++ b/src/ui/theme.rs @@ -21,9 +21,7 @@ pub fn detect_system_theme() -> Result { /// Resolve the actual theme to use based on preference pub fn resolve_theme_mode(preference: ThemeMode) -> ThemeMode { match preference { - ThemeMode::System => detect_system_theme() - .inspect_err(|e| tracing::warn!("Failed to detect system theme: {}", e)) - .unwrap_or(ThemeMode::Light), + ThemeMode::System => detect_system_theme().unwrap_or(ThemeMode::Light), other => other, } } diff --git a/src/utils/tasks.rs b/src/utils/tasks.rs index 3c990b127..b44e4aee7 100644 --- a/src/utils/tasks.rs +++ b/src/utils/tasks.rs @@ -1,4 +1,4 @@ -use std::sync::{Arc, atomic::AtomicUsize}; +use std::sync::{Arc, Mutex, atomic::AtomicUsize}; use tokio::time::{Duration, timeout}; use tokio_util::sync::CancellationToken; @@ -8,7 +8,8 @@ pub const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); #[derive(Debug, Clone)] pub struct TaskManager { pub cancellation_token: CancellationToken, // Cancellation token for graceful shutdown - tasks: Arc>>, // Subtasks for graceful shutdown + tasks: Arc>>, // Subtasks for graceful shutdown + active_names: Arc>>, // Names of currently running tasks } /// TaskManager tracks spawned subtasks and allows for graceful shutdown of all tasks. @@ -20,34 +21,24 @@ impl TaskManager { TaskManager { cancellation_token, tasks: subtasks, + active_names: Arc::new(Mutex::new(Vec::new())), } } - // Spawn a new future as a subtask, to beu used in asynchronous context. - // #[inline(always)] - // pub async fn spawn_async(&self, future: F) - // where - // F: std::future::Future + Send + 'static, - // F::Output: Send + 'static, - // { - // spawn_subtask(self.tasks.clone(), future).await - // } - - /// Spawn a new future as a subtask, to be used in synchronous context. + /// Spawn a named future as a subtask, to be used in synchronous context. /// - /// Right now only used to manage dash-qt process. - /// - /// Note we don't correctly cleanup results of the spawned tasks, causing - /// resource leaks. Before using this function in more places, - /// we must implement a proper cleanup mechanism. + /// The `name` label is logged during shutdown to identify slow tasks. #[inline(always)] - pub fn spawn_sync(&self, future: F) + pub fn spawn_sync(&self, name: &'static str, future: F) where F: std::future::Future + Send + 'static, F::Output: Send + 'static, { + if let Ok(mut names) = self.active_names.lock() { + names.push(name); + } let subtasks = self.tasks.clone(); - tokio::spawn(spawn_subtask(subtasks, future)); + tokio::spawn(spawn_subtask(subtasks, name, future)); } /// Shutdown all subtasks gracefully. @@ -58,6 +49,7 @@ impl TaskManager { pub fn shutdown(&self) -> Result<(), String> { let cancel = self.cancellation_token.clone(); let subtasks = self.tasks.clone(); + let active_names = self.active_names.clone(); // a bit naive synchronization to wait for shutdown let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); @@ -65,25 +57,76 @@ impl TaskManager { let completed = Arc::new(AtomicUsize::new(0)); let counter = completed.clone(); + let counter_for_timeout = completed.clone(); // we need to run this task in separate task to avoid cancelling it during shutdown tokio::task::spawn(async move { // Cancel all background tasks + tracing::trace!("shutdown: cancelling all tasks"); cancel.cancel(); // Wait for all subtasks to finish within SHUTDOWN_TIMEOUT let tasks_list = subtasks.clone(); - timeout(SHUTDOWN_TIMEOUT, async move { + let names_for_join = active_names.clone(); + let timed_out = timeout(SHUTDOWN_TIMEOUT, async move { let mut tasks = tasks_list.lock().await; + let total = tasks.len(); + tracing::trace!(total, "shutdown: joining tasks"); + let start = std::time::Instant::now(); while let Some(handle) = tasks.join_next().await { - if let Err(e) = handle { - tracing::error!("Subtask failed: {:?}", e); + let i = counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1; + match &handle { + Ok(name) => { + // Remove one instance of this name from active list + if let Ok(mut names) = names_for_join.lock() + && let Some(pos) = names.iter().position(|n| *n == *name) + { + names.swap_remove(pos); + } + tracing::trace!( + task = name, + task_num = i, + total, + elapsed_ms = start.elapsed().as_millis() as u64, + "shutdown: task joined OK" + ); + } + Err(e) => tracing::trace!( + task_num = i, + total, + elapsed_ms = start.elapsed().as_millis() as u64, + error = %e, + "shutdown: task joined with error" + ), } - counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed); } }) - .await - .ok(); // ignore output as we are shutting down anyway + .await; + + if timed_out.is_err() { + let done = counter_for_timeout.load(std::sync::atomic::Ordering::Relaxed); + let remaining: Vec<&str> = + active_names.lock().map(|n| n.clone()).unwrap_or_default(); + tracing::trace!( + completed = done, + remaining_count = remaining.len(), + remaining_tasks = ?remaining, + "shutdown: timed out waiting for tasks, aborting remaining" + ); + + #[cfg(tokio_unstable)] + { + let handle = tokio::runtime::Handle::current(); + let dump = handle.dump().await; + for (i, task) in dump.tasks().iter().enumerate() { + tracing::trace!( + task_num = i, + trace = %task.trace(), + "shutdown: active tokio task" + ); + } + } + } // now abort all tasks subtasks.lock().await.shutdown().await; @@ -114,13 +157,19 @@ impl TaskManager { } #[inline(always)] -async fn spawn_subtask(subtasks: Arc>>, future: F) -where +async fn spawn_subtask( + subtasks: Arc>>, + name: &'static str, + future: F, +) where F: std::future::Future + Send + 'static, F::Output: Send + 'static, { let mut subtasks_lock = subtasks.lock().await; - subtasks_lock.spawn(future); + subtasks_lock.spawn(async move { + future.await; + name + }); } impl Default for TaskManager {