diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index c2afccae66..3f24cd169d 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -57,6 +57,11 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Patch non-fast-block node + run: | + perl -0777 -i -pe 's|7 \* 24 \* 60 \* 60 / 12 // 7 days|5 // Only 5 blocks for tests|' runtime/src/lib.rs + perl -0777 -i -pe 's|pub fn DefaultPendingCooldown\(\) -> u64 \{\s*if cfg!\(feature = "fast-blocks"\) \{\s*return 15;\s*\}\s*7_200\s*\}|pub fn DefaultPendingCooldown() -> u64 {\n 15\n }|g' pallets/subtensor/src/lib.rs + - name: Build and push Docker image uses: docker/build-push-action@v6 with: diff --git a/.github/workflows/label-triggers.yml b/.github/workflows/label-triggers.yml index f3c330f85c..bcf43e4c23 100644 --- a/.github/workflows/label-triggers.yml +++ b/.github/workflows/label-triggers.yml @@ -25,4 +25,4 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, body: '@opentensor/cerebrum / @opentensor/gyrus / @opentensor/cortex breaking change detected! Please prepare accordingly!' - }) + }) \ No newline at end of file diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml index 71f69fcd75..6040485eca 100644 --- a/.github/workflows/run-benchmarks.yml +++ b/.github/workflows/run-benchmarks.yml @@ -3,6 +3,9 @@ name: Validate-Benchmarks on: pull_request: + types: + - opened + - synchronize workflow_dispatch: concurrency: @@ -11,36 +14,105 @@ concurrency: jobs: validate-benchmarks: - if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-validate-benchmarks') }} - runs-on: SubtensorCI + runs-on: Benchmarking + + env: + SKIP_BENCHMARKS: '0' + steps: - - name: Checkout PR branch + - name: Check out PR branch + if: ${{ env.SKIP_BENCHMARKS != '1' }} uses: actions/checkout@v4 with: ref: ${{ github.head_ref }} fetch-depth: 0 + - name: Install GitHub CLI + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + sudo apt-get update + sudo apt-get install -y gh + echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + - name: Install system dependencies + if: ${{ env.SKIP_BENCHMARKS != '1' }} run: | sudo apt-get update sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + - name: Install Rust toolchain + if: ${{ env.SKIP_BENCHMARKS != '1' }} uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + - name: Cache Rust build + if: ${{ env.SKIP_BENCHMARKS != '1' }} uses: Swatinem/rust-cache@v2 with: key: bench-${{ hashFiles('**/Cargo.lock') }} + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + - name: Build node with benchmarks + if: ${{ env.SKIP_BENCHMARKS != '1' }} run: | cargo build --profile production -p node-subtensor --features runtime-benchmarks + - name: Check skip label + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label found — skipping benchmarks." + echo "SKIP_BENCHMARKS=1" >> "$GITHUB_ENV" + fi + - name: Run & validate benchmarks + if: ${{ env.SKIP_BENCHMARKS != '1' }} run: | chmod +x scripts/benchmark_action.sh ./scripts/benchmark_action.sh + + - name: Check skip label after run + if: ${{ env.SKIP_BENCHMARKS != '1' }} + run: | + labels=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + if echo "$labels" | grep -q "skip-validate-benchmarks"; then + echo "skip-validate-benchmarks label was found — but benchmarks already ran." + fi diff --git a/Cargo.lock b/Cargo.lock index 65578627f9..b0c56ffb2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2364,7 +2364,7 @@ checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fc-api" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "async-trait", "fp-storage", @@ -2373,10 +2373,26 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "fc-aura" +version = "1.0.0-dev" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" +dependencies = [ + "fc-rpc", + "fp-storage", + "sc-client-api", + "sc-consensus-aura", + "sp-api", + "sp-consensus-aura", + "sp-inherents", + "sp-runtime", + "sp-timestamp", +] + [[package]] name = "fc-consensus" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "async-trait", "fp-consensus", @@ -2392,7 +2408,7 @@ dependencies = [ [[package]] name = "fc-db" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "async-trait", "ethereum", @@ -2422,7 +2438,7 @@ dependencies = [ [[package]] name = "fc-mapping-sync" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fc-db", "fc-storage", @@ -2445,7 +2461,7 @@ dependencies = [ [[package]] name = "fc-rpc" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2468,7 +2484,6 @@ dependencies = [ "rand", "rlp", "sc-client-api", - "sc-consensus-aura", "sc-network", "sc-network-sync", "sc-rpc", @@ -2482,7 +2497,6 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", "sp-core", "sp-externalities 0.29.0", "sp-inherents", @@ -2490,7 +2504,6 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-storage 21.0.0", - "sp-timestamp", "substrate-prometheus-endpoint", "thiserror", "tokio", @@ -2499,7 +2512,7 @@ dependencies = [ [[package]] name = "fc-rpc-core" version = "1.1.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2508,13 +2521,13 @@ dependencies = [ "rustc-hex", "serde", "serde_json", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] name = "fc-storage" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2670,7 +2683,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", ] @@ -2697,7 +2710,7 @@ dependencies = [ [[package]] name = "fp-account" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "hex", "impl-serde", @@ -2716,7 +2729,7 @@ dependencies = [ [[package]] name = "fp-consensus" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "parity-scale-codec", @@ -2727,7 +2740,7 @@ dependencies = [ [[package]] name = "fp-ethereum" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2739,7 +2752,7 @@ dependencies = [ [[package]] name = "fp-evm" version = "3.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "evm", "frame-support", @@ -2754,7 +2767,7 @@ dependencies = [ [[package]] name = "fp-rpc" version = "3.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -2770,7 +2783,7 @@ dependencies = [ [[package]] name = "fp-self-contained" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "frame-support", "parity-scale-codec", @@ -2782,7 +2795,7 @@ dependencies = [ [[package]] name = "fp-storage" version = "2.0.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "parity-scale-codec", "serde", @@ -2797,7 +2810,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-support-procedural", @@ -2821,7 +2834,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "43.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "array-bytes", @@ -2871,7 +2884,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "aquamarine", "frame-support", @@ -2901,7 +2914,7 @@ dependencies = [ [[package]] name = "frame-metadata-hash-extension" version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "docify", @@ -2915,8 +2928,8 @@ dependencies = [ [[package]] name = "frame-support" -version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "38.2.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "aquamarine", "array-bytes", @@ -2939,7 +2952,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-crypto-hashing-proc-macro", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-genesis-builder", "sp-inherents", "sp-io", @@ -2947,7 +2960,7 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-tracing 17.0.1", "sp-weights", "static_assertions", @@ -2956,8 +2969,8 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "30.0.3" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "30.0.6" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "cfg-expr", @@ -2970,7 +2983,7 @@ dependencies = [ "proc-macro-warning 1.0.2", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "syn 2.0.90", ] @@ -2990,7 +3003,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support-procedural-tools-derive 12.0.0", "proc-macro-crate 3.2.0", @@ -3013,7 +3026,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "12.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro2", "quote", @@ -3023,7 +3036,7 @@ dependencies = [ [[package]] name = "frame-system" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "cfg-if", "docify", @@ -3035,7 +3048,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-version", "sp-weights", ] @@ -3043,7 +3056,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -3057,7 +3070,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "parity-scale-codec", @@ -3067,7 +3080,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "parity-scale-codec", @@ -3696,7 +3709,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -5526,6 +5539,7 @@ dependencies = [ "async-trait", "clap", "fc-api", + "fc-aura", "fc-consensus", "fc-db", "fc-mapping-sync", @@ -5669,7 +5683,7 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-storage 21.0.0", "sp-tracing 17.0.1", "sp-transaction-pool", @@ -6004,7 +6018,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-tracing 17.0.1", "sp-weights", "substrate-fixed", @@ -6014,7 +6028,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6030,7 +6044,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6042,8 +6056,8 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "39.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6058,7 +6072,7 @@ dependencies = [ [[package]] name = "pallet-base-fee" version = "1.0.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "frame-support", @@ -6082,7 +6096,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", ] @@ -6107,7 +6121,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", "tle", "w3f-bls", @@ -6120,6 +6134,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", "pallet-preimage", "parity-scale-codec", @@ -6127,7 +6142,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", ] @@ -6167,7 +6182,7 @@ dependencies = [ [[package]] name = "pallet-ethereum" version = "4.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "ethereum", "ethereum-types", @@ -6189,7 +6204,7 @@ dependencies = [ [[package]] name = "pallet-evm" version = "6.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "environmental", "evm", @@ -6212,7 +6227,7 @@ dependencies = [ [[package]] name = "pallet-evm-chain-id" version = "1.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "frame-support", "frame-system", @@ -6223,7 +6238,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-modexp" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "num", @@ -6232,7 +6247,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-sha3fips" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "tiny-keccak", @@ -6241,7 +6256,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-simple" version = "2.0.0-dev" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "fp-evm", "ripemd", @@ -6251,7 +6266,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6273,7 +6288,7 @@ dependencies = [ [[package]] name = "pallet-hotfix-sufficients" version = "1.0.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6288,7 +6303,7 @@ dependencies = [ [[package]] name = "pallet-insecure-randomness-collective-flip" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6301,7 +6316,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6317,7 +6332,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6332,7 +6347,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6365,7 +6380,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -6389,14 +6404,14 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-macros", ] [[package]] name = "pallet-root-testing" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6410,15 +6425,15 @@ dependencies = [ [[package]] name = "pallet-safe-mode" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", - "pallet-proxy 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", - "pallet-utility 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "pallet-proxy 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", + "pallet-utility 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -6428,7 +6443,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6445,7 +6460,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6503,7 +6518,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-tracing 17.0.1", "sp-version", "substrate-fixed", @@ -6515,7 +6530,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6530,7 +6545,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-benchmarking", @@ -6548,8 +6563,8 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "38.0.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-support", "frame-system", @@ -6564,7 +6579,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "41.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6580,7 +6595,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6611,7 +6626,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-benchmarking", "frame-support", @@ -7075,7 +7090,7 @@ dependencies = [ [[package]] name = "precompile-utils" version = "0.1.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "environmental", "evm", @@ -7099,14 +7114,14 @@ dependencies = [ [[package]] name = "precompile-utils-macro" version = "0.1.0" -source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" +source = "git+https://github.com/opentensor/frontier?rev=cd6bca14a3#cd6bca14a366cc7cb1c3b1b1d7bc8213667e4126" dependencies = [ "case", "num_enum", "prettyplease 0.2.22", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "syn 1.0.109", ] @@ -8091,7 +8106,7 @@ name = "safe-math" version = "0.1.0" dependencies = [ "num-traits", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "substrate-fixed", ] @@ -8125,7 +8140,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "sp-core", @@ -8136,7 +8151,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "futures", "futures-timer", @@ -8158,7 +8173,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.42.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "sp-api", @@ -8173,7 +8188,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "docify", @@ -8189,7 +8204,7 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-genesis-builder", "sp-io", "sp-runtime", @@ -8200,7 +8215,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "12.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", @@ -8211,7 +8226,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.47.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "chrono", @@ -8252,7 +8267,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "fnv", "futures", @@ -8278,8 +8293,8 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "hash-db", "kvdb", @@ -8305,7 +8320,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -8329,7 +8344,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -8358,7 +8373,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "fork-tree", @@ -8383,7 +8398,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-slots", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-inherents", "sp-keystore", "sp-runtime", @@ -8394,7 +8409,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8407,7 +8422,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.30.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ahash 0.8.11", "array-bytes", @@ -8441,7 +8456,7 @@ dependencies = [ "sp-consensus", "sp-consensus-grandpa", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", @@ -8451,7 +8466,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.30.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "finality-grandpa", "futures", @@ -8471,7 +8486,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" version = "0.46.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "assert_matches", "async-trait", @@ -8506,7 +8521,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -8529,7 +8544,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.40.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", @@ -8552,7 +8567,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "polkavm", "sc-allocator", @@ -8565,7 +8580,7 @@ dependencies = [ [[package]] name = "sc-executor-polkavm" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "polkavm", @@ -8576,7 +8591,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "anyhow", "cfg-if", @@ -8594,7 +8609,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "console", "futures", @@ -8611,7 +8626,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "33.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "parking_lot 0.12.3", @@ -8625,7 +8640,7 @@ dependencies = [ [[package]] name = "sc-mixnet" version = "0.15.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "arrayvec", @@ -8653,8 +8668,8 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.45.6" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "async-channel", @@ -8705,7 +8720,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -8723,7 +8738,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ahash 0.8.11", "futures", @@ -8741,8 +8756,8 @@ dependencies = [ [[package]] name = "sc-network-light" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "async-channel", @@ -8762,8 +8777,8 @@ dependencies = [ [[package]] name = "sc-network-sync" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "async-channel", @@ -8799,8 +8814,8 @@ dependencies = [ [[package]] name = "sc-network-transactions" -version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "0.44.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "futures", @@ -8819,7 +8834,7 @@ dependencies = [ [[package]] name = "sc-network-types" version = "0.12.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bs58 0.5.1", "ed25519-dalek", @@ -8836,7 +8851,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "40.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "bytes", @@ -8870,7 +8885,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.18.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -8879,7 +8894,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "40.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "futures", "jsonrpsee", @@ -8911,7 +8926,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.44.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -8930,8 +8945,8 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "17.1.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "dyn-clone", "forwarded-header-value", @@ -8955,7 +8970,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.45.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "futures", @@ -8987,7 +9002,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.46.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "directories", @@ -9051,7 +9066,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.36.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "log", "parity-scale-codec", @@ -9062,7 +9077,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "derive_more", "futures", @@ -9075,15 +9090,15 @@ dependencies = [ "serde", "serde_json", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] name = "sc-telemetry" version = "25.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "chrono", "futures", @@ -9103,7 +9118,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "37.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "chrono", "console", @@ -9132,7 +9147,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", @@ -9143,7 +9158,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -9159,7 +9174,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-runtime", "sp-tracing 17.0.1", "sp-transaction-pool", @@ -9170,7 +9185,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -9186,7 +9201,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-channel", "futures", @@ -9632,7 +9647,7 @@ name = "share-pool" version = "0.1.0" dependencies = [ "safe-math", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "substrate-fixed", ] @@ -9778,7 +9793,7 @@ dependencies = [ [[package]] name = "sp-api" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "hash-db", @@ -9800,7 +9815,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "blake2 0.10.6", @@ -9814,7 +9829,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -9826,7 +9841,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "integer-sqrt", @@ -9849,7 +9864,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-api", "sp-inherents", @@ -9859,7 +9874,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "37.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "futures", "parity-scale-codec", @@ -9878,7 +9893,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "futures", @@ -9893,7 +9908,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -9909,7 +9924,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -9927,7 +9942,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "21.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "finality-grandpa", "log", @@ -9944,7 +9959,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.40.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -9955,7 +9970,7 @@ dependencies = [ [[package]] name = "sp-core" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "bitflags 1.3.2", @@ -9984,11 +9999,11 @@ dependencies = [ "secp256k1", "secrecy", "serde", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-externalities 0.29.0", "sp-runtime-interface 28.0.0", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-storage 21.0.0", "ss58-registry", "substrate-bip39", @@ -10021,7 +10036,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.14.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -10055,7 +10070,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "blake2b_simd", "byteorder", @@ -10068,17 +10083,17 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "syn 2.0.90", ] [[package]] name = "sp-database" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "kvdb", "parking_lot 0.12.3", @@ -10087,7 +10102,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "proc-macro2", "quote", @@ -10117,7 +10132,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "environmental", "parity-scale-codec", @@ -10127,7 +10142,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.15.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -10139,7 +10154,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10151,8 +10166,8 @@ dependencies = [ [[package]] name = "sp-io" -version = "38.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "38.0.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bytes", "docify", @@ -10164,7 +10179,7 @@ dependencies = [ "rustversion", "secp256k1", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-externalities 0.29.0", "sp-keystore", "sp-runtime-interface 28.0.0", @@ -10178,7 +10193,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-core", "sp-runtime", @@ -10188,7 +10203,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.40.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", @@ -10199,7 +10214,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "thiserror", "zstd 0.12.4", @@ -10208,7 +10223,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.7.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -10218,7 +10233,7 @@ dependencies = [ [[package]] name = "sp-mixnet" version = "0.12.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -10229,7 +10244,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-api", "sp-core", @@ -10239,7 +10254,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "backtrace", "lazy_static", @@ -10249,7 +10264,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "32.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "rustc-hash 1.1.0", "serde", @@ -10258,8 +10273,8 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "39.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "39.0.5" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "either", @@ -10277,7 +10292,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-weights", "tracing", ] @@ -10304,7 +10319,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -10313,7 +10328,7 @@ dependencies = [ "primitive-types", "sp-externalities 0.29.0", "sp-runtime-interface-proc-macro 18.0.0", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-storage 21.0.0", "sp-tracing 17.0.1", "sp-wasm-interface 21.0.1", @@ -10336,7 +10351,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "18.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "expander", @@ -10349,7 +10364,7 @@ dependencies = [ [[package]] name = "sp-session" version = "36.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "scale-info", @@ -10363,7 +10378,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "36.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10376,7 +10391,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.43.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "hash-db", "log", @@ -10396,7 +10411,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "18.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "aes-gcm", "curve25519-dalek", @@ -10409,7 +10424,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-externalities 0.29.0", "sp-runtime", "sp-runtime-interface 28.0.0", @@ -10420,7 +10435,7 @@ dependencies = [ [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" [[package]] name = "sp-std" @@ -10442,19 +10457,19 @@ dependencies = [ [[package]] name = "sp-storage" version = "21.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] name = "sp-timestamp" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -10477,7 +10492,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "17.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "tracing", @@ -10488,7 +10503,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "sp-api", "sp-runtime", @@ -10497,7 +10512,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "34.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "async-trait", "parity-scale-codec", @@ -10511,7 +10526,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "ahash 0.8.11", "hash-db", @@ -10534,7 +10549,7 @@ dependencies = [ [[package]] name = "sp-version" version = "37.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10543,7 +10558,7 @@ dependencies = [ "serde", "sp-crypto-hashing-proc-macro", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "sp-version-proc-macro", "thiserror", ] @@ -10551,7 +10566,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -10573,7 +10588,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "21.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -10585,7 +10600,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "31.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "bounded-collections", "parity-scale-codec", @@ -10593,7 +10608,7 @@ dependencies = [ "serde", "smallvec", "sp-arithmetic", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", ] [[package]] @@ -10773,8 +10788,8 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staging-xcm" -version = "14.2.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "14.2.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "bounded-collections", @@ -10894,7 +10909,7 @@ dependencies = [ [[package]] name = "substrate-bip39" version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "hmac 0.12.1", "pbkdf2", @@ -10906,7 +10921,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" [[package]] name = "substrate-fixed" @@ -10922,7 +10937,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "39.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "docify", "frame-system-rpc-runtime-api", @@ -10942,7 +10957,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "http-body-util", "hyper 1.5.0", @@ -10955,8 +10970,8 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" -version = "24.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +version = "24.0.2" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "array-bytes", "build-helper", @@ -11063,7 +11078,7 @@ dependencies = [ "precompile-utils", "sp-core", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7)", "subtensor-runtime-common", ] @@ -12707,7 +12722,7 @@ dependencies = [ [[package]] name = "xcm-procedural" version = "10.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409-7#0c9644766f02c872f51e5b72adf1051189fe9126" dependencies = [ "Inflector", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 08cdfbf91e..548bc5af63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,127 +100,128 @@ approx = "0.5" subtensor-macros = { path = "support/macros" } -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } -pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } pallet-proxy = { path = "pallets/proxy", default-features = false } -pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } pallet-utility = { path = "pallets/utility", default-features = false } -pallet-root-testing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-root-testing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } -sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } substrate-fixed = { git = "https://github.com/opentensor/substrate-fixed.git", tag = "v0.5.9" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } -substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } +substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7" } -sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } # Frontier -fp-evm = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-rpc = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-self-contained = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false, features = [ +fp-evm = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-rpc = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-self-contained = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false, features = [ "serde", ] } -fp-account = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-storage = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-db = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-consensus = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-consensus = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fp-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-api = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-rpc = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false, features = [ +fp-account = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-storage = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-db = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-consensus = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-consensus = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fp-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-api = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-rpc = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false, features = [ "rpc-binary-search-estimate", ] } -fc-rpc-core = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -fc-mapping-sync = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -precompile-utils = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } +fc-rpc-core = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-aura = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +fc-mapping-sync = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +precompile-utils = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } # Frontier FRAME -pallet-base-fee = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-ethereum = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-chain-id = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-precompile-modexp = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-precompile-sha3fips = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-evm-precompile-simple = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } -pallet-hotfix-sufficients = { git = "https://github.com/opentensor/frontier", rev = "635bdac882", default-features = false } +pallet-base-fee = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-dynamic-fee = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-ethereum = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-chain-id = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-precompile-modexp = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-precompile-sha3fips = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-evm-precompile-simple = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } +pallet-hotfix-sufficients = { git = "https://github.com/opentensor/frontier", rev = "cd6bca14a3", default-features = false } #DRAND pallet-drand = { path = "pallets/drand", default-features = false } -sp-crypto-ec-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", features = [ +sp-crypto-ec-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", features = [ "bls12-381", ] } getrandom = { version = "0.2.15", features = [ "custom", ], default-features = false } -sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409-7", default-features = false } w3f-bls = { version = "=0.1.3", default-features = false } ark-crypto-primitives = { version = "0.4.0", default-features = false, features = [ "r1cs", @@ -268,4 +269,4 @@ runtime-benchmarks = [ "node-subtensor-runtime/runtime-benchmarks", ] metadata-hash = ["node-subtensor-runtime/metadata-hash"] -pow-faucet = [] \ No newline at end of file +pow-faucet = [] diff --git a/Dockerfile b/Dockerfile index 44e4bbe12f..447ed98b5e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,14 @@ -ARG BASE_IMAGE=rust:1.83 -FROM $BASE_IMAGE AS base_builder +# ------------------------------------------------------------------------------ +# Subtensor Dockerfile (hardened) +# – Builds production and local binaries +# – Final runtime images run as non-root `subtensor` user (UID/GID 10001) +# ------------------------------------------------------------------------------ + +############################################################################### +# ---------- 1. Common build environment ------------------------------------- +############################################################################### +ARG BASE_IMAGE=rust:latest +FROM ${BASE_IMAGE} AS base_builder LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.vendor="Opentensor Foundation" \ @@ -7,58 +16,88 @@ LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ ai.opentensor.image.documentation="https://docs.bittensor.com" -RUN rustup update stable -RUN rustup target add wasm32-unknown-unknown --toolchain stable - +# Rust targets +RUN rustup update stable && \ + rustup target add wasm32-unknown-unknown --toolchain stable -# Set up Rust environment +# Build prerequisites ENV RUST_BACKTRACE=1 -RUN apt-get update && apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev -RUN rm -rf /var/lib/apt/lists/* +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl build-essential protobuf-compiler clang git pkg-config libssl-dev && \ + rm -rf /var/lib/apt/lists/* -# Copy entire repository +# Copy entire repository once for all build stages (maximises cache hits) COPY . /build WORKDIR /build -# -# Image for building prod -# +############################################################################### +# ---------- 2. Production build stage --------------------------------------- +############################################################################### FROM base_builder AS prod_builder -# Build the project -RUN cargo build -p node-subtensor --profile production --features="metadata-hash" --locked -# Verify the binary was produced -RUN test -e /build/target/production/node-subtensor -EXPOSE 30333 9933 9944 -# -# Final prod image -# -FROM $BASE_IMAGE AS subtensor -# Copy all chainspec files -COPY --from=prod_builder /build/*.json / -COPY --from=prod_builder /build/chainspecs/*.json / -# Copy final binary -COPY --from=prod_builder /build/target/production/node-subtensor /usr/local/bin +# Build the production binary (profile defined in Cargo.toml) +RUN cargo build -p node-subtensor --profile production --features "metadata-hash" --locked \ + && test -e /build/target/production/node-subtensor # sanity-check +############################################################################### +# ---------- 3. Final production image (hardened) ---------------------------- +############################################################################### +FROM ${BASE_IMAGE} AS subtensor + +# ---- security hardening: create least-privilege user ---- +RUN addgroup --system --gid 10001 subtensor && \ + adduser --system --uid 10001 --gid 10001 --home /home/subtensor --disabled-password subtensor + +# Writable data directory to be used as --base-path +RUN mkdir -p /data && chown -R subtensor:subtensor /data + +# Workdir for the non-root user +WORKDIR /home/subtensor + +# Copy chainspecs and binary with correct ownership +COPY --chown=subtensor:subtensor --from=prod_builder /build/*.json ./ +COPY --chown=subtensor:subtensor --from=prod_builder /build/chainspecs/*.json ./chainspecs/ +COPY --from=prod_builder /build/target/production/node-subtensor /usr/local/bin/ +RUN chown subtensor:subtensor /usr/local/bin/node-subtensor -# -# Image for building local -# -FROM base_builder AS local_builder -# Build the project -RUN cargo build --workspace --profile release --features="pow-faucet" -# Verify the binary was produced -RUN test -e /build/target/release/node-subtensor EXPOSE 30333 9933 9944 +USER subtensor +ENTRYPOINT ["node-subtensor"] +CMD ["--base-path","/data"] +############################################################################### +# ---------- 4. Local build stage -------------------------------------------- +############################################################################### +FROM base_builder AS local_builder + +# Build the workspace in release mode with the pow-faucet feature +RUN cargo build --workspace --profile release --features "pow-faucet" \ + && test -e /build/target/release/node-subtensor # sanity-check + +############################################################################### +# ---------- 5. Final local image (hardened) ---------------------------------- +############################################################################### +FROM ${BASE_IMAGE} AS subtensor-local + +# Least-privilege user +RUN addgroup --system --gid 10001 subtensor && \ + adduser --system --uid 10001 --gid 10001 --home /home/subtensor --disabled-password subtensor -# -# Final local image -# -FROM $BASE_IMAGE AS subtensor-local -# Copy all chainspec files -COPY --from=local_builder /build/*.json / -COPY --from=local_builder /build/chainspecs/*.json / -# Copy final binary -COPY --from=local_builder /build/target/release/node-subtensor /usr/local/bin -RUN "node-subtensor" build-spec --disable-default-bootnode --raw --chain local > /localnet.json +RUN mkdir -p /data && chown -R subtensor:subtensor /data +WORKDIR /home/subtensor + +# Copy artifacts +COPY --chown=subtensor:subtensor --from=local_builder /build/*.json ./ +COPY --chown=subtensor:subtensor --from=local_builder /build/chainspecs/*.json ./chainspecs/ +COPY --from=local_builder /build/target/release/node-subtensor /usr/local/bin/ +RUN chown subtensor:subtensor /usr/local/bin/node-subtensor + +# Generate a local chainspec for convenience (run as root before user switch) +RUN node-subtensor build-spec --disable-default-bootnode --raw --chain local > /localnet.json \ + && chown subtensor:subtensor /localnet.json + +EXPOSE 30333 9933 9944 +USER subtensor +ENTRYPOINT ["node-subtensor"] +CMD ["--base-path","/data","--chain","/localnet.json"] diff --git a/docs/consensus.md b/docs/consensus.md index 881b465b48..c3a04c380f 100644 --- a/docs/consensus.md +++ b/docs/consensus.md @@ -17,6 +17,8 @@ Community oversight (as in Steemit) must identify wrongful downvoting, but only High-volume, on-demand generative content (as in Bittensor) demands automated evaluation and divide-and-conquer validation, but introduces subjectivity both in the automated value measures and mutually exclusive task subsets across subnet validators. A coalition of validators can collude to skew scoring of subnet servers in their favour, which is harder to detect because of the inherent subjectivity. Existing consensus mechanisms will fail to deter reward manipulation for such high-volume subjective utility networks, so the need for a more sophisticated consensus arises. +--- + ### Consensus Mechanism Yuma Consensus guarantees long-term network honesty despite persistent adversarial presence in high-volume subjective utility networks. It directly penalizes selfish scoring by down-correction to the majority consensus and slashing of cabal voting stake, and also penalizes low-scoring of honest servers via forfeited validator rewards when cabals don’t score at consensus. @@ -31,6 +33,8 @@ Yuma Consensus is adversarially-resilient when majority stake is honest, via sta **Cabal sets high self-weight**: Cabal servers with poor utility will receive low weights from majority stake, and high self-weight from minority cabals will then get reduced to the low consensus. This means that minority cabals lose voting power as penalty for unfair voting while still receiving low consensus weight despite high self-weight. This consensus mechanism thus protects against selfish weighting if the majority stake is honest. +--- + ### Game-theoretic framework #### Preliminaries @@ -112,6 +116,64 @@ let mut ema_bonds: Vec> = mat_ema( &bonds_delta, &bonds, alpha ); / let mut dividends: Vec = inplace_normalize(matmul_transpose( &ema_bonds, &incentive )); // Validator reward ``` +--- + +### Monte Carlo simulations + +We consider a two-team game between (protagonist) honest stake ($0.5< S_H\le 1$) and (adversarial) cabal stake ($1 - S_H$), with $|H|$ honest and $|C|$ cabal players, that have $S_H = \sum_{i\in H}S_i$ honest stake and $1-S_H = \sum_{i\in C}S_i$ cabal stake. + +#### Network sizing + +A network size of $N=|H|+|C|=(|H_V|+|H_S|)+(|C_V|+|C_S|)=512$ and validator count of $|H_V|+|C_V|=64$ is considered for consensus guarantee experiments, and the honest/cabal ratio $|H|/N=S_H$ reflects the honest stake ratio $S_H$, but modifying extremes to ensure that each subset has at least one validator and at least one server. + +#### Stake sampling + +For the Monte Carlo simulations we use Gaussian distributions for stake and weight assignments, and ensure that the honest/cabal ratios are met. Note that stake is only assigned to validator nodes $H_V$ and $C_V$ and not servers. + +Firstly, we sample initial validator ($i\in H_V\cup C_V$) stake values $S'_i \sim \mathcal{N}(1,\sigma_S^{2})$ with a typical $\sigma_S=0.3$ standard deviation, followed by clamping to avoid negative stake: + +$$S'_i = \begin{cases} +x & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x \ge 0 \\ +0 & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x < 0 +\end{cases}$$ + +Then we normalize each honest/cabal subset and multiply by its stake proportion, which thus gives an overall normalized stake and the correct stake ratio for each subset: + +$$S_{i\in H_V} = S_H \cdot S'\_i \left/ \sum_{k\in H_V} S'\_k\right.\qquad\qquad S_{i\in C_V} = (1-S_H)\cdot S'\_i \left/ \sum_{k\in C_V}S'\_k\right.$$ + +#### Weight sampling + +Similarly, we randomize the weights that validators $H_V,C_V$ set on servers $H_S,C_S$. +Specifically, honest players $i\in H$ set $W_H = \sum_{j\in H}W_{ij}$ self-weight and $1-W_H = \sum_{j\in C}W_{ij}$ weight on cabal players, while cabal players $i\in C$ set $W_C = \sum_{j\in C}W_{ij}$ self-weight and $1-W_C = \sum_{j\in H}W_{ij}$ weight on honest players. + +We firstly sample initial weights $W'_{ij} \sim \mathcal{N}(1,\sigma_W^{2})$ with various standard deviations ranging in $0\ge\sigma_W\ge0.4$, but then clamping to avoid negative weights: + +$$W'_{ij} = \begin{cases} +x & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x \geq 0 \\ +0 & \text{if } x \sim \mathcal{N}(1, \sigma_S^2), x < 0 +\end{cases}$$ + +Weight setting between the two subsets forms quadrants $H_V\rightarrow H_S$, $H_V\rightarrow C_S$, $C_V\rightarrow H_S$, and $C_V\rightarrow C_S$, so we ensure those weight ratios are met by normalizing each weight subset and multiplying by the corresponding quadrant ratio: + +$$W_{i\in H_V, j\in H_S} = W_H\cdot W'\_{ij} \left/ \sum_{k\in H_S}W'\_{ik}\right.\qquad\qquad W_{i\in H_V, j\in C_S} = (1-W_H)\cdot W'\_{ij} \left/ \sum_{k\in C_S}W'\_{ik}\right.$$ + +$$W_{i\in C_V, j\in H_S} = (1-W_C)\cdot W'\_{ij} \left/ \sum_{k\in H_S}W'\_{ik}\right.\qquad\qquad W_{i\in C_V, j\in C_S} = W_C\cdot W'\_{ij} \left/ \sum_{k\in C_S}W'\_{ik}\right.$$ + +#### Emission calculation + +Given the simulation parameters of the network size, validator count, a defined major/honest stake $S_H$, a defined major/honest utility $W_H$, and a defined minor/cabal self-weight $W_C$, we have now instantiated the network with randomly sampled stake and weights and can proceed with an emission calculation. + +We calculate the consensus $\overline{W_j} = \arg \max_w \left( \sum_i S_i \cdot \left\lbrace W_{ij} \ge w \right\rbrace \ge \kappa \right)$ for each server $j$, and calculate consensus-clipped weights $\overline{W_{ij}} = \min( W_{ij}, \overline{W_j} )$. This then gives us the adjusted weights that offers a measure of protection against reward manipulation. + +To calculate emissions for this epoch, we firstly calculate server rank $R_j = \sum_i S_i \cdot \overline{W_{ij}}$ then incentive $I_j = R_j / \sum_k R_k$, as well as validator bonds $\Delta B_{ij} = S_i \cdot \widetilde{W_{ij}} \left/ \left( \sum_k S_k \cdot \widetilde{W_{kj}} \right) \right.$ and rewards $D_i = \sum_j B_{ij} \cdot I_j$. + +Then we add up server incentive and validator bonds over honest nodes to obtain honest emission $E_H = \xi \cdot D_{i\in H} + (1-\xi) \cdot I_{i\in H}$ with a typical validator reward ratio of $\xi=0.5$. +The objective is to prove major stake retention $S_H\ge E_H$ for a single epoch, which by extension proves retention over many epochs due to additive nature of EMA bonds, so we do not bother with validator EMA bonds in these experiments. + +The honest objective $S_H\le E_H$ at least retains scoring power $S_H$ over all action transitions in the game, otherwise when $E_H\le S_H$ honest emission will erode to 0 over time, despite a starting condition of $0.5\lt S_H$. + +--- + ### Consensus guarantees Yuma Consensus guarantees honest majority stake retention $S_H\le E_H$ even under worst-case adversarial attacks, given sufficiently large honest utility $W_H$. The specific honest stake and utility pairs that delineate the guarantees are complicated by natural variances inside large realistic networks. Therefore, we use extensive random sampling simulations (Monte Carlo studies) of large realistic networks and subject them to varying degrees of adversarial attacks, and calculate comprehensive consensus guarantees under representative conditions. @@ -124,9 +186,9 @@ The x-axis is major self-weight and the y-axis is minor self-weight, and each co Major/honest self-weight $W_H$ is the true honest utility, while minor/cabal self-weight $W_C$ is an arbitrary value a self-serving coalition may self-report.

- - - + + +

To understand how we construct these plots, let us first consider contour plot for a single major/honest stake setting $S_H=0.6$. Here each contour value is the honest emission $E_H$, and we highlight at (1) the specific contour $E_H=0.6$ that matches the honest stake. This means that any weight setting on contour $E_H=S_H=0.6$ will retain honest stake, while any setting to the right of it will grow honest stake. @@ -138,18 +200,20 @@ A compound plot then combines all the highlighted $S_H=E_H$ contours from indivi Retention graphs like these comprehensively capture consensus guarantees across all primary conditions, and we utilize these to analyze the effect of consensus hyperparameters. Subtensor integration tests run Monte Carlo simulations of large realistic networks under adversarial conditions, and constructs retention profiles to confirm consensus guarantees of the actual blockchain implementation. -Retention profiles are reproducible by running [`_map_consensus_guarantees`](../pallets/subtensor/tests/epoch.rs) (decorate with `#[test]`). +Retention profiles are reproducible by running test [`map_consensus_guarantees()`](../pallets/subtensor/src/tests/consensus.rs) and plotting with [`map_consensus.py`](../scripts/map_consensus.py). ```bash -RUST_BACKTRACE=1 SKIP_WASM_BUILD=1 cargo test -- _map_consensus_guarantees --exact --nocapture > consensus.txt +RUST_BACKTRACE=1 SKIP_WASM_BUILD=1 RUSTFLAGS="-C opt-level=3" cargo test --manifest-path=pallets/subtensor/Cargo.toml -- tests::consensus::map_consensus_guarantees --exact --nocapture > consensus.txt + +python scripts/map_consensus.py consensus.txt ``` #### Subjectivity variance Yuma Consensus corrects reward manipulation in subjective utility networks, but the extent of subjectivity influences the exact consensus guarantees. In particular, we expect lower subjectivity to offer improved guarantees since there is stronger consensus. However, for higher variance in assigned weights it is easier to hide reward manipulation, we then expect poorer guarantees.

- - - + + +

We assume normally distributed weights originating from a particular side, either honest or cabal, then we modify the weight deviation magnitude $\sigma(W)$ in terms of the mean weight $\mu(W)$. @@ -167,9 +231,9 @@ Increasing $\kappa$ demands greater honest stake, e.g. when $\kappa=0.6$ there i Hence $\kappa=0.5$ is typically the most sensible setting.

- - - + + +

#### Bonds penalty (β) @@ -179,9 +243,9 @@ Lower-stake validators may experience lower service priority, which can result i Full bonds penalty $\beta=1$ may not be desired, due to the presence of non-adversarial cases like these.

- - - + + +

We expect that greater bonds penalty will penalize out-of-consensus validators more, which means less emission going to cabals. Comprehensive simulation with $\beta = 0$, $0.5$, and $1$ respectively show 78%, 76%, and 73% honest utility requirement. This confirms the expectation, that greater bonds penalty means greater inflation going to the honest majority. @@ -191,10 +255,110 @@ Subnet servers need incentive to deliver high utility, and subnet validators nee We expect that more emission going to validators will improve security guarantees, since self-serving validation can then be economically disincentivized.

- - - + + +

We set validation reward ratio at $\xi=0$, $0.25$, and $0.5$ and respectively observe 82%, 78%, 73% honest utility requirement for 60% honest stake preservation. -This means that network security improves as the validation reward ratio is increased, although a significant server incentive ratio still needs to be maintained to ensure overall high utility. \ No newline at end of file +This means that network security improves as the validation reward ratio is increased, although a significant server incentive ratio still needs to be maintained to ensure overall high utility. + +--- + +### Reproduce Consensus Plots (Runpod) + +This guide demonstrates how to reproduce consensus retention profile plots on a minimal Runpod CPU instance. + +#### 1. Deploy Runpod Instance + +Navigate to https://www.runpod.io/console/deploy and select the following: + +* **Pod Type:** CPU Pod, CPU5 (5.7 GHz • DDR5 RAM • NVMe) or equivalent. +* **Instance Configuration:** Compute-Optimized ($0.07/hr, 2 vCPUs, 4GB RAM). + +**Important:** Edit the template and set "Container Disk (Temporary)" to 20GB. This ensures sufficient disk space for the process. + +Retrieve the connection details, including the SSH command and port, under "Connect" -> "SSH over exposed TCP". You can optionally enable Jupyter access (`8888:localhost:8888`) if desired. Connect to your instance via SSH: + +```bash +ssh -L 8888:localhost:8888 root@ -p -i ~/.ssh/id_ed25519 # Replace placeholders +``` + +#### 2. Set up the Environment + +1. **Start a `tmux` session for persistence:** + + ```bash + tmux + ``` + +2. **Update system packages and install prerequisites (Python, Rust, and dependencies):** + + ```bash + sudo apt-get update && sudo apt install -y build-essential clang curl git make libssl-dev llvm libudev-dev protobuf-compiler python3 python3-pip \ + && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ + && source ~/.cargo/env && rustup default stable && rustup update \ + && rustup target add wasm32-unknown-unknown \ + && rustup toolchain install nightly \ + && rustup target add --toolchain nightly wasm32-unknown-unknown + + ``` + +3. **Clone the Subtensor repository and checkout the relevant branch:** + + ```bash + git clone https://github.com/opentensor/subtensor.git + cd subtensor + git checkout main + + ``` + + +#### 3. Simulate Networks and Generate Data + +The Subtensor integration tests simulate large, realistic networks under adversarial conditions to generate retention profiles that validate the blockchain's consensus guarantees. Building takes about 10 minutes, and the actual test itself another 15 minutes approximately. + + +```bash +RUST_BACKTRACE=1 SKIP_WASM_BUILD=1 RUSTFLAGS="-C opt-level=3" cargo test --manifest-path=pallets/subtensor/Cargo.toml -- tests::consensus::map_consensus_guarantees --exact --nocapture > consensus.txt +``` +This command runs the `map_consensus_guarantees` test and saves the output to `consensus.txt`. Replace `` with a float e.g. 1.0 (100% bonds penalty). + +#### 4. Generate Contour Plots + +1. **Create a Python virtual environment and install necessary libraries:** + + ```bash + python3 -m venv .venv + source .venv/bin/activate + pip install numpy matplotlib jupyterlab + + ``` + +2. **Run the plotting script:** + + ```bash + python3 scripts/map_consensus.py consensus.txt + ``` + This generates an SVG file named `consensus_plot.svg` in the current directory. + + +#### 5. Explore and Modify (Optional) + +You can use Jupyter-lab to interactively explore and modify the generated plots: + +1. **Start Jupyter-lab (on VPS):** + ```bash + jupyter-lab --allow-root --port=8888 + ``` + +2. **Connect to Jupyter:** Open the provided URL (e.g., `http://localhost:8888/tree?token=...`) in your local workstation web browser. + +3. **Modify the plotting script:** Edit `scripts/map_consensus.py` to customize the plots, otherwise download the SVG file. + + +#### Disclaimer + +> This reproduction procedure is provided as a guide and may require adjustments depending on your specific VPS environment and configuration. While every effort has been made to ensure accuracy and completeness, variations in system setup, software versions, or network conditions could affect the results. +> +> Please exercise caution when executing commands with root privileges and ensure you understand the potential implications before proceeding. The author assumes no responsibility for any issues arising from the use of this procedure. If you encounter problems or have suggestions for improvement, please open an issue on this repository. diff --git a/evm-tests/package-lock.json b/evm-tests/package-lock.json index 0a4a52bf57..ce2766fb4e 100644 --- a/evm-tests/package-lock.json +++ b/evm-tests/package-lock.json @@ -6,7 +6,6 @@ "": { "license": "ISC", "dependencies": { - "@polkadot-api/descriptors": "file:.papi/descriptors", "@polkadot-labs/hdkd": "^0.0.10", "@polkadot-labs/hdkd-helpers": "^0.0.11", "@polkadot/api": "15.1.1", @@ -16,6 +15,7 @@ "ethers": "^6.13.5", "mocha": "^11.1.0", "polkadot-api": "^1.9.5", + "scale-ts": "^1.6.1", "viem": "2.23.4" }, "devDependencies": { @@ -31,7 +31,8 @@ }, ".papi/descriptors": { "name": "@polkadot-api/descriptors", - "version": "0.1.0-autogenerated.1047499684690955440", + "version": "0.1.0-autogenerated.7914363913476982777", + "extraneous": true, "peerDependencies": { "polkadot-api": "*" } @@ -87,10 +88,266 @@ "node": ">=12" } }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.2.tgz", + "integrity": "sha512-wCIboOL2yXZym2cgm6mlA742s9QeJ8DjGVaL39dLN4rRwrOgOyYSnOaFPhKZGLb2ngj4EyfAFjsNJwPXZvseag==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.2.tgz", + "integrity": "sha512-NQhH7jFstVY5x8CKbcfa166GoV0EFkaPkCKBQkdPJFvo5u+nGXLEH/ooniLb3QI8Fk58YAx7nsPLozUWfCBOJA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.2.tgz", + "integrity": "sha512-5ZAX5xOmTligeBaeNEPnPaeEuah53Id2tX4c2CVP3JaROTH+j4fnfHCkr1PjXMd78hMst+TlkfKcW/DlTq0i4w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.2.tgz", + "integrity": "sha512-Ffcx+nnma8Sge4jzddPHCZVRvIfQ0kMsUsCMcJRHkGJ1cDmhe4SsrYIjLUKn1xpHZybmOqCWwB0zQvsjdEHtkg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.2.tgz", + "integrity": "sha512-MpM6LUVTXAzOvN4KbjzU/q5smzryuoNjlriAIx+06RpecwCkL9JpenNzpKd2YMzLJFOdPqBpuub6eVRP5IgiSA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.2.tgz", + "integrity": "sha512-5eRPrTX7wFyuWe8FqEFPG2cU0+butQQVNcT4sVipqjLYQjjh8a8+vUTfgBKM88ObB85ahsnTwF7PSIt6PG+QkA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.2.tgz", + "integrity": "sha512-mLwm4vXKiQ2UTSX4+ImyiPdiHjiZhIaE9QvC7sw0tZ6HoNMjYAqQpGyui5VRIi5sGd+uWq940gdCbY3VLvsO1w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.2.tgz", + "integrity": "sha512-6qyyn6TjayJSwGpm8J9QYYGQcRgc90nmfdUb0O7pp1s4lTY+9D0H9O02v5JqGApUyiHOtkz6+1hZNvNtEhbwRQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.2.tgz", + "integrity": "sha512-UHBRgJcmjJv5oeQF8EpTRZs/1knq6loLxTsjc3nxO9eXAPDLcWW55flrMVc97qFPbmZP31ta1AZVUKQzKTzb0g==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.2.tgz", + "integrity": "sha512-gq/sjLsOyMT19I8obBISvhoYiZIAaGF8JpeXu1u8yPv8BE5HlWYobmlsfijFIZ9hIVGYkbdFhEqC0NvM4kNO0g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.2.tgz", + "integrity": "sha512-bBYCv9obgW2cBP+2ZWfjYTU+f5cxRoGGQ5SeDbYdFCAZpYWrfjjfYwvUpP8MlKbP0nwZ5gyOU/0aUzZ5HWPuvQ==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.2.tgz", + "integrity": "sha512-SHNGiKtvnU2dBlM5D8CXRFdd+6etgZ9dXfaPCeJtz+37PIUlixvlIhI23L5khKXs3DIzAn9V8v+qb1TRKrgT5w==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.2.tgz", + "integrity": "sha512-hDDRlzE6rPeoj+5fsADqdUZl1OzqDYow4TB4Y/3PlKBD0ph1e6uPHzIQcv2Z65u2K0kpeByIyAjCmjn1hJgG0Q==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.2.tgz", + "integrity": "sha512-tsHu2RRSWzipmUi9UBDEzc0nLc4HtpZEI5Ba+Omms5456x5WaNuiG3u7xh5AO6sipnJ9r4cRWQB2tUjPyIkc6g==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.2.tgz", + "integrity": "sha512-k4LtpgV7NJQOml/10uPU0s4SAXGnowi5qBSjaLWMojNCUICNu7TshqHLAEbkBdAszL5TabfvQ48kK84hyFzjnw==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.2.tgz", + "integrity": "sha512-GRa4IshOdvKY7M/rDpRR3gkiTNp34M0eLTaC1a08gNrh4u488aPhuZOCpkF6+2wl3zAN7L7XIpOFBhnaE3/Q8Q==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/linux-x64": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.1.tgz", - "integrity": "sha512-xbfUhu/gnvSEg+EGovRc+kjBAkrvtk38RlerAzQxvMzlB4fXpCFCeUAYzJvrnhFtdeyVCDANSjJvOvGYoeKzFA==", + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.2.tgz", + "integrity": "sha512-QInHERlqpTTZ4FRB0fROQWXcYRD64lAoiegezDunLpalZMjcUcld3YzZmVJ2H/Cp0wJRZ8Xtjtj0cEHhYc/uUg==", "cpu": [ "x64" ], @@ -103,6 +360,134 @@ "node": ">=18" } }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.2.tgz", + "integrity": "sha512-talAIBoY5M8vHc6EeI2WW9d/CkiO9MQJ0IOWX8hrLhxGbro/vBXJvaQXefW2cP0z0nQVTdQ/eNyGFV1GSKrxfw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.2.tgz", + "integrity": "sha512-voZT9Z+tpOxrvfKFyfDYPc4DO4rk06qamv1a/fkuzHpiVBMOhpjK+vBmWM8J1eiB3OLSMFYNaOaBNLXGChf5tg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.2.tgz", + "integrity": "sha512-dcXYOC6NXOqcykeDlwId9kB6OkPUxOEqU+rkrYVqJbK2hagWOMrsTGsMr8+rW02M+d5Op5NNlgMmjzecaRf7Tg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.2.tgz", + "integrity": "sha512-t/TkWwahkH0Tsgoq1Ju7QfgGhArkGLkF1uYz8nQS/PPFlXbP5YgRpqQR3ARRiC2iXoLTWFxc6DJMSK10dVXluw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.2.tgz", + "integrity": "sha512-cfZH1co2+imVdWCjd+D1gf9NjkchVhhdpgb1q5y6Hcv9TP6Zi9ZG/beI3ig8TvwT9lH9dlxLq5MQBBgwuj4xvA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.2.tgz", + "integrity": "sha512-7Loyjh+D/Nx/sOTzV8vfbB3GJuHdOQyrOryFdZvPHLf42Tk9ivBU5Aedi7iyX+x6rbn2Mh68T4qq1SDqJBQO5Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.2.tgz", + "integrity": "sha512-WRJgsz9un0nqZJ4MfhabxaD9Ft8KioqU3JMinOTvobbX6MOSUigSBlogP8QB3uxpJDsFS6yN+3FDBdqE5lg9kg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.2.tgz", + "integrity": "sha512-kM3HKb16VIXZyIeVrM1ygYmZBKybX8N4p754bw390wGO3Tf2j4L2/WYL+4suWujpgf6GBYs3jv7TyUivdd05JA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -296,18 +681,18 @@ } }, "node_modules/@polkadot-api/cli": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/@polkadot-api/cli/-/cli-0.11.2.tgz", - "integrity": "sha512-W5ycHU/RGmKzs9Myzs2hv2eR555Z+/5Pd+Iguu2WEShC2Kxq1bxEc+XPCSSEF24apMGdlywBVRrh1D5LfpleFA==", + "version": "0.11.9", + "resolved": "https://registry.npmjs.org/@polkadot-api/cli/-/cli-0.11.9.tgz", + "integrity": "sha512-5Qt+YRf/kOCZGiFoWzgyxoZYA9OpN28AFE4jQ4nZI33lty8oH4FR62IF2iLF+KdafhgF9k9l1Kj24zuBFH3Vrw==", "license": "MIT", "dependencies": { "@commander-js/extra-typings": "^13.1.0", - "@polkadot-api/codegen": "0.13.1", + "@polkadot-api/codegen": "0.13.3", "@polkadot-api/ink-contracts": "0.2.6", "@polkadot-api/json-rpc-provider": "0.0.4", - "@polkadot-api/known-chains": "0.7.1", - "@polkadot-api/metadata-compatibility": "0.1.16", - "@polkadot-api/observable-client": "0.8.2", + "@polkadot-api/known-chains": "0.7.3", + "@polkadot-api/metadata-compatibility": "0.2.0", + "@polkadot-api/observable-client": "0.8.6", "@polkadot-api/polkadot-sdk-compat": "2.3.2", "@polkadot-api/sm-provider": "0.1.7", "@polkadot-api/smoldot": "0.3.8", @@ -316,7 +701,7 @@ "@polkadot-api/utils": "0.1.2", "@polkadot-api/wasm-executor": "^0.1.2", "@polkadot-api/ws-provider": "0.4.0", - "@types/node": "^22.13.9", + "@types/node": "^22.14.0", "commander": "^13.1.0", "execa": "^9.5.2", "fs.promises.exists": "^1.1.4", @@ -325,7 +710,7 @@ "rxjs": "^7.8.2", "tsc-prog": "^2.3.0", "tsup": "^8.4.0", - "typescript": "^5.8.2", + "typescript": "^5.8.3", "write-package": "^7.1.0" }, "bin": { @@ -333,200 +718,19 @@ "polkadot-api": "dist/main.js" } }, - "node_modules/@polkadot-api/cli/node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.35.0.tgz", - "integrity": "sha512-Pim1T8rXOri+0HmV4CdKSGrqcBWX0d1HoPnQ0uw0bdp1aP5SdQVNBy8LjYncvnLgu3fnnCt17xjWGd4cqh8/hA==", - "cpu": [ - "x64" - ], - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@polkadot-api/cli/node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.35.0.tgz", - "integrity": "sha512-QysqXzYiDvQWfUiTm8XmJNO2zm9yC9P/2Gkrwg2dH9cxotQzunBHYr6jk4SujCTqnfGxduOmQcI7c2ryuW8XVg==", - "cpu": [ - "x64" - ], - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@polkadot-api/cli/node_modules/@types/node": { - "version": "22.13.10", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.10.tgz", - "integrity": "sha512-I6LPUvlRH+O6VRUqYOcMudhaIdUVWfsjnZavnsraHvpBwaEyMN29ry+0UVJhImYL16xsscu0aske3yA+uPOWfw==", - "license": "MIT", - "dependencies": { - "undici-types": "~6.20.0" - } - }, - "node_modules/@polkadot-api/cli/node_modules/chokidar": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", - "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", - "license": "MIT", - "dependencies": { - "readdirp": "^4.0.1" - }, - "engines": { - "node": ">= 14.16.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@polkadot-api/cli/node_modules/readdirp": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", - "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", - "license": "MIT", - "engines": { - "node": ">= 14.18.0" - }, - "funding": { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@polkadot-api/cli/node_modules/rollup": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.35.0.tgz", - "integrity": "sha512-kg6oI4g+vc41vePJyO6dHt/yl0Rz3Thv0kJeVQ3D1kS3E5XSuKbPc29G4IpT/Kv1KQwgHVcN+HtyS+HYLNSvQg==", - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.6" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.35.0", - "@rollup/rollup-android-arm64": "4.35.0", - "@rollup/rollup-darwin-arm64": "4.35.0", - "@rollup/rollup-darwin-x64": "4.35.0", - "@rollup/rollup-freebsd-arm64": "4.35.0", - "@rollup/rollup-freebsd-x64": "4.35.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.35.0", - "@rollup/rollup-linux-arm-musleabihf": "4.35.0", - "@rollup/rollup-linux-arm64-gnu": "4.35.0", - "@rollup/rollup-linux-arm64-musl": "4.35.0", - "@rollup/rollup-linux-loongarch64-gnu": "4.35.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.35.0", - "@rollup/rollup-linux-riscv64-gnu": "4.35.0", - "@rollup/rollup-linux-s390x-gnu": "4.35.0", - "@rollup/rollup-linux-x64-gnu": "4.35.0", - "@rollup/rollup-linux-x64-musl": "4.35.0", - "@rollup/rollup-win32-arm64-msvc": "4.35.0", - "@rollup/rollup-win32-ia32-msvc": "4.35.0", - "@rollup/rollup-win32-x64-msvc": "4.35.0", - "fsevents": "~2.3.2" - } - }, - "node_modules/@polkadot-api/cli/node_modules/tsc-prog": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/tsc-prog/-/tsc-prog-2.3.0.tgz", - "integrity": "sha512-ycET2d75EgcX7y8EmG4KiZkLAwUzbY4xRhA6NU0uVbHkY4ZjrAAuzTMxXI85kOwATqPnBI5C/7y7rlpY0xdqHA==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "peerDependencies": { - "typescript": ">=4" - } - }, - "node_modules/@polkadot-api/cli/node_modules/tsup": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/tsup/-/tsup-8.4.0.tgz", - "integrity": "sha512-b+eZbPCjz10fRryaAA7C8xlIHnf8VnsaRqydheLIqwG/Mcpfk8Z5zp3HayX7GaTygkigHl5cBUs+IhcySiIexQ==", - "license": "MIT", - "dependencies": { - "bundle-require": "^5.1.0", - "cac": "^6.7.14", - "chokidar": "^4.0.3", - "consola": "^3.4.0", - "debug": "^4.4.0", - "esbuild": "^0.25.0", - "joycon": "^3.1.1", - "picocolors": "^1.1.1", - "postcss-load-config": "^6.0.1", - "resolve-from": "^5.0.0", - "rollup": "^4.34.8", - "source-map": "0.8.0-beta.0", - "sucrase": "^3.35.0", - "tinyexec": "^0.3.2", - "tinyglobby": "^0.2.11", - "tree-kill": "^1.2.2" - }, - "bin": { - "tsup": "dist/cli-default.js", - "tsup-node": "dist/cli-node.js" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@microsoft/api-extractor": "^7.36.0", - "@swc/core": "^1", - "postcss": "^8.4.12", - "typescript": ">=4.5.0" - }, - "peerDependenciesMeta": { - "@microsoft/api-extractor": { - "optional": true - }, - "@swc/core": { - "optional": true - }, - "postcss": { - "optional": true - }, - "typescript": { - "optional": true - } - } - }, - "node_modules/@polkadot-api/cli/node_modules/typescript": { - "version": "5.8.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", - "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, "node_modules/@polkadot-api/codegen": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/@polkadot-api/codegen/-/codegen-0.13.1.tgz", - "integrity": "sha512-pqJI2gFrk5rfaO8IyGw59DvJH2PyvWXx/dTxev6VsX3BLvYVdb/vISQjdrHpsCk4BHqmWrlLnhw1/jFVotf4ew==", + "version": "0.13.3", + "resolved": "https://registry.npmjs.org/@polkadot-api/codegen/-/codegen-0.13.3.tgz", + "integrity": "sha512-+8mp9k5L9myFSLv6Ad5r63JSIeq80/tKbk67rczDq6Co0PlJHqxult+wZHohHuyJSdtu8dHW9JQktTtM2RZT1w==", "license": "MIT", "dependencies": { "@polkadot-api/ink-contracts": "0.2.6", "@polkadot-api/metadata-builders": "0.10.2", - "@polkadot-api/metadata-compatibility": "0.1.16", + "@polkadot-api/metadata-compatibility": "0.2.0", "@polkadot-api/substrate-bindings": "0.11.1", "@polkadot-api/utils": "0.1.2" } }, - "node_modules/@polkadot-api/descriptors": { - "resolved": ".papi/descriptors", - "link": true - }, "node_modules/@polkadot-api/ink-contracts": { "version": "0.2.6", "resolved": "https://registry.npmjs.org/@polkadot-api/ink-contracts/-/ink-contracts-0.2.6.tgz", @@ -551,9 +755,9 @@ "license": "MIT" }, "node_modules/@polkadot-api/known-chains": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/@polkadot-api/known-chains/-/known-chains-0.7.1.tgz", - "integrity": "sha512-65hwgOrS0dFi4J6LQy043fZoBv29ctvAO91gQjSyhQdTionpoNVEizUWZwJj2qx3U4+sSovQXP+s71QBpv8NZA==", + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@polkadot-api/known-chains/-/known-chains-0.7.3.tgz", + "integrity": "sha512-yBRVbOLn0e36+EGWE2/hX8mhTKvfdZtbk2VCgTM9djkz28eDFfiDjEl6biQA8Q0Kd7t3iRzoNbBzpzyBwTMXUg==", "license": "MIT" }, "node_modules/@polkadot-api/logs-provider": { @@ -576,9 +780,9 @@ } }, "node_modules/@polkadot-api/metadata-compatibility": { - "version": "0.1.16", - "resolved": "https://registry.npmjs.org/@polkadot-api/metadata-compatibility/-/metadata-compatibility-0.1.16.tgz", - "integrity": "sha512-30qCfWUtxdaCy/9vwnBf4CGrtZ4KGSZDGz+d3fBSx7S2o5ezFmauld4NCKNo4SQiS1S0I7eixV2/JlkMhGqxBQ==", + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@polkadot-api/metadata-compatibility/-/metadata-compatibility-0.2.0.tgz", + "integrity": "sha512-ZvHj4KDQy/JFqV51UN6Gk5xnG0qt/BUS4kjYosLWT9y6p5bHg/4ge7QF5lMloInQqV3Rul9NQo4cKUz3SlSQMQ==", "license": "MIT", "dependencies": { "@polkadot-api/metadata-builders": "0.10.2", @@ -586,9 +790,9 @@ } }, "node_modules/@polkadot-api/observable-client": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/@polkadot-api/observable-client/-/observable-client-0.8.2.tgz", - "integrity": "sha512-yMjKKOcToHYtOU+V1xWE7D0Ddhqn7uNPj3Zv1kHR+AhhHR4bEbG1S5CtUAyQOgJDQxaAHDRNujXNxsLcT9nqmw==", + "version": "0.8.6", + "resolved": "https://registry.npmjs.org/@polkadot-api/observable-client/-/observable-client-0.8.6.tgz", + "integrity": "sha512-ci5HC8TYjGxoTG/QM+LLuGrfIsn+dtR7BBQz483c/ML8K/Hxl9v+evgZzPi9xNMwZ25mytn9lhA5dovYSEauSA==", "license": "MIT", "dependencies": { "@polkadot-api/metadata-builders": "0.10.2", @@ -1280,7 +1484,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -1294,7 +1497,6 @@ "cpu": [ "x64" ], - "dev": true, "license": "MIT", "optional": true, "os": [ @@ -1565,12 +1767,12 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.13.5", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.5.tgz", - "integrity": "sha512-+lTU0PxZXn0Dr1NBtC7Y8cR21AJr87dLLU953CWA6pMxxv/UDc7jYAY90upcrie1nRcD6XNG5HOYEDtgW5TxAg==", + "version": "22.14.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.14.1.tgz", + "integrity": "sha512-u0HuPQwe/dHrItgHHpmw3N2fYCR6x4ivMNbPHRkBVP4CvN+kiRrKHWk3i8tXiO/joPwXLMYvF9TTF0eqgHIuOw==", "license": "MIT", "dependencies": { - "undici-types": "~6.20.0" + "undici-types": "~6.21.0" } }, "node_modules/@types/normalize-package-data": { @@ -2027,9 +2229,9 @@ } }, "node_modules/consola": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.0.tgz", - "integrity": "sha512-EiPU8G6dQG0GFHNR8ljnZFki/8a+cQwEQ+7wpxdChl02Q8HXlwEZWD5lqAF8vC2sEC3Tehr8hy7vErz88LHyUA==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", "license": "MIT", "engines": { "node": "^14.18.0 || >=16.10.0" @@ -2247,9 +2449,9 @@ } }, "node_modules/esbuild": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.1.tgz", - "integrity": "sha512-BGO5LtrGC7vxnqucAe/rmvKdJllfGaYWdyABvyMoXQlfYMb2bbRuReWR5tEGE//4LcNJj9XrkovTqNYRFZHAMQ==", + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.2.tgz", + "integrity": "sha512-16854zccKPnC+toMywC+uKNeYSv+/eXkevRAfwRD/G9Cleq66m8XFIrigkbvauLLlCfDL45Q2cWegSg53gGBnQ==", "hasInstallScript": true, "license": "MIT", "bin": { @@ -2259,31 +2461,31 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.1", - "@esbuild/android-arm": "0.25.1", - "@esbuild/android-arm64": "0.25.1", - "@esbuild/android-x64": "0.25.1", - "@esbuild/darwin-arm64": "0.25.1", - "@esbuild/darwin-x64": "0.25.1", - "@esbuild/freebsd-arm64": "0.25.1", - "@esbuild/freebsd-x64": "0.25.1", - "@esbuild/linux-arm": "0.25.1", - "@esbuild/linux-arm64": "0.25.1", - "@esbuild/linux-ia32": "0.25.1", - "@esbuild/linux-loong64": "0.25.1", - "@esbuild/linux-mips64el": "0.25.1", - "@esbuild/linux-ppc64": "0.25.1", - "@esbuild/linux-riscv64": "0.25.1", - "@esbuild/linux-s390x": "0.25.1", - "@esbuild/linux-x64": "0.25.1", - "@esbuild/netbsd-arm64": "0.25.1", - "@esbuild/netbsd-x64": "0.25.1", - "@esbuild/openbsd-arm64": "0.25.1", - "@esbuild/openbsd-x64": "0.25.1", - "@esbuild/sunos-x64": "0.25.1", - "@esbuild/win32-arm64": "0.25.1", - "@esbuild/win32-ia32": "0.25.1", - "@esbuild/win32-x64": "0.25.1" + "@esbuild/aix-ppc64": "0.25.2", + "@esbuild/android-arm": "0.25.2", + "@esbuild/android-arm64": "0.25.2", + "@esbuild/android-x64": "0.25.2", + "@esbuild/darwin-arm64": "0.25.2", + "@esbuild/darwin-x64": "0.25.2", + "@esbuild/freebsd-arm64": "0.25.2", + "@esbuild/freebsd-x64": "0.25.2", + "@esbuild/linux-arm": "0.25.2", + "@esbuild/linux-arm64": "0.25.2", + "@esbuild/linux-ia32": "0.25.2", + "@esbuild/linux-loong64": "0.25.2", + "@esbuild/linux-mips64el": "0.25.2", + "@esbuild/linux-ppc64": "0.25.2", + "@esbuild/linux-riscv64": "0.25.2", + "@esbuild/linux-s390x": "0.25.2", + "@esbuild/linux-x64": "0.25.2", + "@esbuild/netbsd-arm64": "0.25.2", + "@esbuild/netbsd-x64": "0.25.2", + "@esbuild/openbsd-arm64": "0.25.2", + "@esbuild/openbsd-x64": "0.25.2", + "@esbuild/sunos-x64": "0.25.2", + "@esbuild/win32-arm64": "0.25.2", + "@esbuild/win32-ia32": "0.25.2", + "@esbuild/win32-x64": "0.25.2" } }, "node_modules/escalade": { @@ -2793,9 +2995,9 @@ } }, "node_modules/human-signals": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.0.tgz", - "integrity": "sha512-/1/GPCpDUCCYwlERiYjxoczfP0zfvZMU/OWgQPMya9AbAE24vseigFdhAMObpc8Q4lc/kjutPfUddDYyAmejnA==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.1.tgz", + "integrity": "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==", "license": "Apache-2.0", "engines": { "node": ">=18.18.0" @@ -2811,9 +3013,9 @@ } }, "node_modules/index-to-position": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/index-to-position/-/index-to-position-0.1.2.tgz", - "integrity": "sha512-MWDKS3AS1bGCHLBA2VLImJz42f7bJh8wQsTGCzI3j519/CASStoDONUBVz2I/VID0MpiX3SGSnbOD2xUalbE5g==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/index-to-position/-/index-to-position-1.1.0.tgz", + "integrity": "sha512-XPdx9Dq4t9Qk1mTMbWONJqU7boCoumEH7fRET37HX5+khDUl3J2W6PdALxhILYlIYx2amlwYcRPp28p0tSiojg==", "license": "MIT", "engines": { "node": ">=18" @@ -3508,18 +3710,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/npm-run-path/node_modules/unicorn-magic": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", - "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -3765,14 +3955,14 @@ "license": "BlueOak-1.0.0" }, "node_modules/parse-json": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.1.0.tgz", - "integrity": "sha512-rum1bPifK5SSar35Z6EKZuYPJx85pkNaFrxBK3mwdfSJ1/WKbYrjoW/zTPSjRRamfmVX1ACBIdFAO0VRErW/EA==", + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", + "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.22.13", - "index-to-position": "^0.1.2", - "type-fest": "^4.7.1" + "@babel/code-frame": "^7.26.2", + "index-to-position": "^1.1.0", + "type-fest": "^4.39.1" }, "engines": { "node": ">=18" @@ -3856,28 +4046,28 @@ } }, "node_modules/pirates": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", - "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/polkadot-api": { - "version": "1.9.5", - "resolved": "https://registry.npmjs.org/polkadot-api/-/polkadot-api-1.9.5.tgz", - "integrity": "sha512-wHe2TFqBVbiAE9CDLZA/xMbMCfOtch6++kSmDIWY7i9MmcWJwZhDHpHlfvRUsVgI/VL36QPEjBH+Kjt3KNLLhw==", + "version": "1.9.12", + "resolved": "https://registry.npmjs.org/polkadot-api/-/polkadot-api-1.9.12.tgz", + "integrity": "sha512-gYhpef5YnLEPZ3Uxeha5sHIIejINONSGBXTgFyEWsYi4y2DEUlv2ISlNZ9/0AGG6b6ZFDd56mLop/Fohl8vA4Q==", "license": "MIT", "dependencies": { - "@polkadot-api/cli": "0.11.2", + "@polkadot-api/cli": "0.11.9", "@polkadot-api/ink-contracts": "0.2.6", "@polkadot-api/json-rpc-provider": "0.0.4", - "@polkadot-api/known-chains": "0.7.1", + "@polkadot-api/known-chains": "0.7.3", "@polkadot-api/logs-provider": "0.0.6", "@polkadot-api/metadata-builders": "0.10.2", - "@polkadot-api/metadata-compatibility": "0.1.16", - "@polkadot-api/observable-client": "0.8.2", + "@polkadot-api/metadata-compatibility": "0.2.0", + "@polkadot-api/observable-client": "0.8.6", "@polkadot-api/pjs-signer": "0.6.5", "@polkadot-api/polkadot-sdk-compat": "2.3.2", "@polkadot-api/polkadot-signer": "0.1.6", @@ -4056,6 +4246,18 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/read-pkg/node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -4106,7 +4308,6 @@ "version": "4.34.8", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.8.tgz", "integrity": "sha512-489gTVMzAYdiZHFVA/ig/iYFllCcWFHMvUHI1rpFmkoUtRlQxqh6/yiNqnYibjMZ2b/+FUQwldG+aLsEt6bglQ==", - "dev": true, "license": "MIT", "dependencies": { "@types/estree": "1.0.6" @@ -4630,16 +4831,107 @@ } } }, + "node_modules/tsc-prog": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tsc-prog/-/tsc-prog-2.3.0.tgz", + "integrity": "sha512-ycET2d75EgcX7y8EmG4KiZkLAwUzbY4xRhA6NU0uVbHkY4ZjrAAuzTMxXI85kOwATqPnBI5C/7y7rlpY0xdqHA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "typescript": ">=4" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, + "node_modules/tsup": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/tsup/-/tsup-8.4.0.tgz", + "integrity": "sha512-b+eZbPCjz10fRryaAA7C8xlIHnf8VnsaRqydheLIqwG/Mcpfk8Z5zp3HayX7GaTygkigHl5cBUs+IhcySiIexQ==", + "license": "MIT", + "dependencies": { + "bundle-require": "^5.1.0", + "cac": "^6.7.14", + "chokidar": "^4.0.3", + "consola": "^3.4.0", + "debug": "^4.4.0", + "esbuild": "^0.25.0", + "joycon": "^3.1.1", + "picocolors": "^1.1.1", + "postcss-load-config": "^6.0.1", + "resolve-from": "^5.0.0", + "rollup": "^4.34.8", + "source-map": "0.8.0-beta.0", + "sucrase": "^3.35.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.11", + "tree-kill": "^1.2.2" + }, + "bin": { + "tsup": "dist/cli-default.js", + "tsup-node": "dist/cli-node.js" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@microsoft/api-extractor": "^7.36.0", + "@swc/core": "^1", + "postcss": "^8.4.12", + "typescript": ">=4.5.0" + }, + "peerDependenciesMeta": { + "@microsoft/api-extractor": { + "optional": true + }, + "@swc/core": { + "optional": true + }, + "postcss": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/tsup/node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/tsup/node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/type-fest": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.35.0.tgz", - "integrity": "sha512-2/AwEFQDFEy30iOLjrvHDIH7e4HEWH+f1Yl1bI5XMqzuoCUqwYCdxachgsgv0og/JdVZUhbfjcJAoHj5L1753A==", + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.40.0.tgz", + "integrity": "sha512-ABHZ2/tS2JkvH1PEjxFDTUWC8dB5OsIGZP4IFLhR293GqT5Y5qB1WwL2kMPYhQW9DVgVD8Hd7I8gjwPIf5GFkw==", "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=16" @@ -4649,10 +4941,9 @@ } }, "node_modules/typescript": { - "version": "5.7.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", - "integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==", - "devOptional": true, + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", @@ -4663,16 +4954,16 @@ } }, "node_modules/undici-types": { - "version": "6.20.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", - "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", "license": "MIT" }, "node_modules/unicorn-magic": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", - "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", - "license": "MIT", + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "license": "MIT", "engines": { "node": ">=18" }, @@ -4822,6 +5113,278 @@ } } }, + "node_modules/vite/node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, "node_modules/vite/node_modules/@esbuild/linux-x64": { "version": "0.21.5", "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", @@ -4839,6 +5402,108 @@ "node": ">=12" } }, + "node_modules/vite/node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, "node_modules/vite/node_modules/esbuild": { "version": "0.21.5", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", diff --git a/evm-tests/package.json b/evm-tests/package.json index 45f03c0b49..0e90cdb976 100644 --- a/evm-tests/package.json +++ b/evm-tests/package.json @@ -15,6 +15,7 @@ "ethers": "^6.13.5", "mocha": "^11.1.0", "polkadot-api": "^1.9.5", + "scale-ts": "^1.6.1", "viem": "2.23.4" }, "devDependencies": { diff --git a/evm-tests/src/contracts/staking.ts b/evm-tests/src/contracts/staking.ts index af4422ca96..0ba37c5a94 100644 --- a/evm-tests/src/contracts/staking.ts +++ b/evm-tests/src/contracts/staking.ts @@ -287,5 +287,71 @@ export const IStakingV2ABI = [ "outputs": [], "stateMutability": "nonpayable", "type": "function" - } + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "addStakeLimit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "removeStakeLimit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, ]; \ No newline at end of file diff --git a/evm-tests/src/contracts/subnet.ts b/evm-tests/src/contracts/subnet.ts index 9b6fe00596..eacdaf3aca 100644 --- a/evm-tests/src/contracts/subnet.ts +++ b/evm-tests/src/contracts/subnet.ts @@ -572,6 +572,43 @@ export const ISubnetABI = [ stateMutability: "view", type: "function", }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getYuma3Enabled", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bool", + name: "yuma3Enabled", + type: "bool", + }, + ], + name: "setYuma3Enabled", + outputs: [], + stateMutability: "payable", + type: "function", + }, { inputs: [ { @@ -886,4 +923,4 @@ export const ISubnetABI = [ stateMutability: "payable", type: "function" }, -]; \ No newline at end of file +]; diff --git a/evm-tests/src/contracts/uidLookup.ts b/evm-tests/src/contracts/uidLookup.ts new file mode 100644 index 0000000000..06c68805e6 --- /dev/null +++ b/evm-tests/src/contracts/uidLookup.ts @@ -0,0 +1,45 @@ +export const IUID_LOOKUP_ADDRESS = "0x0000000000000000000000000000000000000806"; + +export const IUIDLookupABI = [ + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16" + }, + { + internalType: "address", + name: "evm_address", + type: "address" + }, + { + internalType: "uint16", + name: "limit", + type: "uint16" + } + ], + name: "uidLookup", + outputs: [ + { + components: [ + { + internalType: "uint16", + name: "uid", + type: "uint16" + }, + { + internalType: "uint64", + name: "block_associated", + type: "uint64" + } + ], + internalType: "struct LookupItem[]", + name: "", + type: "tuple[]" + } + ], + stateMutability: "view", + type: "function" + } +]; diff --git a/evm-tests/src/substrate.ts b/evm-tests/src/substrate.ts index d86479450f..bd6d725d48 100644 --- a/evm-tests/src/substrate.ts +++ b/evm-tests/src/substrate.ts @@ -173,6 +173,9 @@ export async function getTransactionWatchPromise(tx: Transaction<{}, string, str if (value.type === "finalized") { console.log("Transaction is finalized in block:", value.txHash); subscription.unsubscribe(); + if (!value.ok) { + console.log("Transaction threw an error:", value.dispatchError) + } // Resolve the promise when the transaction is finalized resolve(); diff --git a/evm-tests/src/utils.ts b/evm-tests/src/utils.ts index 36e922b49e..1ba191d833 100644 --- a/evm-tests/src/utils.ts +++ b/evm-tests/src/utils.ts @@ -2,6 +2,8 @@ import { defineChain, http, publicActions, createPublicClient } from "viem" import { privateKeyToAccount, generatePrivateKey } from 'viem/accounts' import { ethers } from "ethers" import { ETH_LOCAL_URL } from "./config" +import { FixedSizeBinary } from "polkadot-api"; +import { hexToU8a } from "@polkadot/util"; export type ClientUrlType = 'http://localhost:9944'; @@ -52,4 +54,16 @@ export function generateRandomEthersWallet() { const wallet = new ethers.Wallet(account.privateKey, provider); return wallet; -} \ No newline at end of file +} + +export function convertToFixedSizeBinary(hexString: string, size: T): FixedSizeBinary { + // Convert hex string to a byte array + const byteArray = hexToU8a(hexString); + + // Ensure the byte array is exactly the specified size + if (byteArray.length !== size) { + throw new Error(`The provided string "${hexString}" does not convert to exactly ${size} bytes.`); + } + + return new FixedSizeBinary(byteArray); +} diff --git a/evm-tests/test/staking.precompile.limit.test.ts b/evm-tests/test/staking.precompile.limit.test.ts new file mode 100644 index 0000000000..759aaecce2 --- /dev/null +++ b/evm-tests/test/staking.precompile.limit.test.ts @@ -0,0 +1,113 @@ +import * as assert from "assert"; +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate"; +import { devnet } from "@polkadot-api/descriptors"; +import { TypedApi } from "polkadot-api"; +import { + convertH160ToSS58, + convertPublicKeyToSs58, +} from "../src/address-utils"; +import { tao, raoToEth } from "../src/balance-math"; +import { + addNewSubnetwork, + addStake, + forceSetBalanceToEthAddress, + forceSetBalanceToSs58Address, + startCall, +} from "../src/subtensor"; +import { ethers } from "ethers"; +import { generateRandomEthersWallet } from "../src/utils"; +import { ISTAKING_V2_ADDRESS, IStakingV2ABI } from "../src/contracts/staking"; +import { log } from "console"; + +describe("Test staking precompile add remove limit methods", () => { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + const wallet1 = generateRandomEthersWallet(); + + let api: TypedApi; + + before(async () => { + api = await getDevnetApi(); + await forceSetBalanceToSs58Address( + api, + convertPublicKeyToSs58(hotkey.publicKey), + ); + await forceSetBalanceToSs58Address( + api, + convertPublicKeyToSs58(coldkey.publicKey), + ); + await forceSetBalanceToEthAddress(api, wallet1.address); + await addNewSubnetwork(api, hotkey, coldkey); + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + await startCall(api, netuid, coldkey); + console.log("will test in subnet: ", netuid); + }); + + it("Staker add limit", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + let ss58Address = convertH160ToSS58(wallet1.address); + + const alpha = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet1, + ); + + const tx = await contract.addStakeLimit( + hotkey.publicKey, + tao(2000), + tao(1000), + true, + netuid, + ); + await tx.wait(); + + const alphaAfterAddStake = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + assert.ok(alphaAfterAddStake > alpha); + }); + + it("Staker remove limit", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + let ss58Address = convertH160ToSS58(wallet1.address); + + const alpha = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet1, + ); + + const tx = await contract.removeStakeLimit( + hotkey.publicKey, + tao(100), + tao(1), + true, + netuid, + ); + await tx.wait(); + + const alphaAfterRemoveStake = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid, + ); + + assert.ok(alphaAfterRemoveStake < alpha); + }); +}); diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index e7b5a1ee0d..57efd64f77 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -79,7 +79,7 @@ describe("Test the Subnet precompile contract", () => { }) - // minDifficulty hyperparameter + // minDifficulty hyperparameter // // disabled: only by sudo // @@ -471,6 +471,26 @@ describe("Test the Subnet precompile contract", () => { assert.equal(valueFromContract, onchainValue); }) + it("Can set yuma3Enabled hyperparameter", async () => + { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + + const newValue = true; + const tx = await contract.setYuma3Enabled(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.Yuma3On.getValue(netuid) + + let valueFromContract = Boolean( + await contract.getYuma3Enabled(netuid) + ); + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + }) + + it("Can set alphaValues parameter", async () => { const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); @@ -509,4 +529,4 @@ describe("Test the Subnet precompile contract", () => { assert.equal(valueFromContract, newValue) assert.equal(valueFromContract, onchainValue); }) -}) \ No newline at end of file +}) diff --git a/evm-tests/test/uid.precompile.lookup.test.ts b/evm-tests/test/uid.precompile.lookup.test.ts new file mode 100644 index 0000000000..6e702d612e --- /dev/null +++ b/evm-tests/test/uid.precompile.lookup.test.ts @@ -0,0 +1,90 @@ +import * as assert from "assert"; + +import { getAliceSigner, getDevnetApi, waitForTransactionCompletion, getRandomSubstrateKeypair, getSignerFromKeypair } from "../src/substrate" +import { convertToFixedSizeBinary, generateRandomEthersWallet, getPublicClient } from "../src/utils"; +import { ETH_LOCAL_URL } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { hexToU8a } from "@polkadot/util"; +import { u64 } from "scale-ts"; +import { PublicClient } from "viem"; +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" +import { IUIDLookupABI, IUID_LOOKUP_ADDRESS } from "../src/contracts/uidLookup" +import { keccak256 } from 'ethers'; +import { addNewSubnetwork, forceSetBalanceToSs58Address, startCall } from "../src/subtensor"; + +describe("Test the UID Lookup precompile", () => { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + const evmWallet = generateRandomEthersWallet(); + let publicClient: PublicClient; + + let api: TypedApi + + let alice: PolkadotSigner; + + let uid: number; + let blockNumber: number; + let netuid: number; + let blockNumberAssociated: bigint; + + before(async () => { + publicClient = await getPublicClient(ETH_LOCAL_URL) + api = await getDevnetApi() + alice = await getAliceSigner(); + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(alice.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + + netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) + + const maybeUid = await api.query.SubtensorModule.Uids.getValue(netuid, convertPublicKeyToSs58(hotkey.publicKey)) + + if (maybeUid === undefined) { + throw new Error("UID should be defined") + } + uid = maybeUid + + // Associate EVM key + blockNumber = await api.query.System.Number.getValue(); + const blockNumberBytes = u64.enc(BigInt(blockNumber)); + const blockNumberHash = hexToU8a(keccak256(blockNumberBytes)); + const concatenatedArray = new Uint8Array([...hotkey.publicKey, ...blockNumberHash]); + const signature = await evmWallet.signMessage(concatenatedArray); + const associateEvmKeyTx = api.tx.SubtensorModule.associate_evm_key({ + netuid: netuid, + hotkey: convertPublicKeyToSs58(hotkey.publicKey), + evm_key: convertToFixedSizeBinary(evmWallet.address, 20), + block_number: BigInt(blockNumber), + signature: convertToFixedSizeBinary(signature, 65) + }); + const signer = getSignerFromKeypair(coldkey); + await waitForTransactionCompletion(api, associateEvmKeyTx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + const storedEvmKey = await api.query.SubtensorModule.AssociatedEvmAddress.getValue(netuid, uid) + assert.notEqual(storedEvmKey, undefined, "storedEvmKey should be defined") + if (storedEvmKey !== undefined) { + assert.equal(storedEvmKey[0].asHex(), convertToFixedSizeBinary(evmWallet.address, 20).asHex()) + blockNumberAssociated = storedEvmKey[1] + } + }) + + it("UID lookup via precompile contract works correctly", async () => { + // Get UID for the EVM address + const uidArray = await publicClient.readContract({ + abi: IUIDLookupABI, + address: toViemAddress(IUID_LOOKUP_ADDRESS), + functionName: "uidLookup", + args: [netuid, evmWallet.address, 1024] + }) + + assert.notEqual(uidArray, undefined, "UID should be defined") + assert.ok(Array.isArray(uidArray), `UID should be an array, got ${typeof uidArray}`) + assert.ok(uidArray.length > 0, "UID array should not be empty") + assert.deepStrictEqual(uidArray[0], { uid: uid, block_associated: blockNumberAssociated }) + }) +}); diff --git a/hyperparameters.md b/hyperparameters.md index c8d2ce1106..31d7261608 100644 --- a/hyperparameters.md +++ b/hyperparameters.md @@ -7,6 +7,7 @@ TxRateLimit: u64 = 1; // [1 @ 64,888] ### netuid 1 (text_prompting) ```rust Rho: u16 = 10; +AlphaSigmoidSteepness: u16 = 10.0 Kappa: u16 = 32_767; // 0.5 = 65535/2 MaxAllowedUids: u16 = 1024; Issuance: u64 = 0; @@ -33,6 +34,7 @@ MaxRegistrationsPerBlock: u16 = 1; PruningScore : u16 = u16::MAX; BondsMovingAverage: u64 = 900_000; BondsPenalty: u16 = 0; +BondsResetOn: bool = false; WeightsVersionKey: u64 = 1020; MinDifficulty: u64 = 10_000_000; MaxDifficulty: u64 = u64::MAX / 4; @@ -46,6 +48,7 @@ WeightsSetRateLimit: u64 = 100; ### netuid 3 (causallmnext) ```rust Rho: u16 = 10; +AlphaSigmoidSteepness: u16 = 10.0 Kappa: u16 = 32_767; // 0.5 = 65535/2 MaxAllowedUids: u16 = 4096; Issuance: u64 = 0; @@ -72,6 +75,7 @@ MaxRegistrationsPerBlock: u16 = 1; PruningScore : u16 = u16::MAX; BondsMovingAverage: u64 = 900_000; BondsPenalty: u16 = 0; +BondsResetOn: bool = false; WeightsVersionKey: u64 = 400; MinDifficulty: u64 = 10_000_000; MaxDifficulty: u64 = u64::MAX / 4; diff --git a/node/Cargo.toml b/node/Cargo.toml index 6cea8f6950..52ccf20de3 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -99,6 +99,7 @@ fc-api = { workspace = true } fc-rpc = { workspace = true } fc-rpc-core = { workspace = true } fp-rpc = { workspace = true } +fc-aura = { workspace = true } fc-mapping-sync = { workspace = true } fp-consensus = { workspace = true } thiserror = { workspace = true } diff --git a/node/src/ethereum.rs b/node/src/ethereum.rs index 158bd84807..c708efd714 100644 --- a/node/src/ethereum.rs +++ b/node/src/ethereum.rs @@ -1,8 +1,9 @@ +use fc_aura::AuraConsensusDataProvider; pub use fc_consensus::FrontierBlockImport; use fc_rpc::{ Debug, DebugApiServer, Eth, EthApiServer, EthConfig, EthDevSigner, EthFilter, EthFilterApiServer, EthPubSub, EthPubSubApiServer, EthSigner, EthTask, Net, NetApiServer, Web3, - Web3ApiServer, pending::AuraConsensusDataProvider, + Web3ApiServer, }; pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; /// Frontier DB backend type. diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 419e5bf06b..2b41539816 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -2,11 +2,8 @@ // extern crate alloc; -pub use pallet::*; -pub mod weights; -pub use weights::WeightInfo; - use frame_system::pallet_prelude::BlockNumberFor; +pub use pallet::*; // - we could replace it with Vec<(AuthorityId, u64)>, but we would need // `sp_consensus_grandpa` for `AuthorityId` anyway // - we could use a type parameter for `AuthorityId`, but there is @@ -66,9 +63,6 @@ pub mod pallet { /// The maximum number of authorities that the pallet can hold. type MaxAuthorities: Get; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - /// Unit of assets type Balance: Balance; } @@ -83,6 +77,13 @@ pub mod pallet { /// Indicates if the precompile operation is enabled or not. enabled: bool, }, + /// Event emitted when the Yuma3 enable is toggled. + Yuma3EnableToggled { + /// The network identifier. + netuid: u16, + /// Indicates if the Yuma3 enable was enabled or disabled. + enabled: bool, + }, } // Errors inform users that something went wrong. @@ -110,6 +111,8 @@ pub mod pallet { Metagraph, /// Enum for neuron precompile Neuron, + /// Enum for UID lookup precompile + UidLookup, } #[pallet::type_value] @@ -136,7 +139,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Aura pallet to change the authorities. #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::swap_authorities(new_authorities.len() as u32))] + #[pallet::weight(Weight::from_parts(6_265_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)))] pub fn swap_authorities( origin: OriginFor, new_authorities: BoundedVec<::AuthorityId, T::MaxAuthorities>, @@ -155,7 +160,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the default take. #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::sudo_set_default_take())] + #[pallet::weight(Weight::from_parts(6_942_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_default_take(origin: OriginFor, default_take: u16) -> DispatchResult { ensure_root(origin)?; pallet_subtensor::Pallet::::set_max_delegate_take(default_take); @@ -179,7 +186,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the serving rate limit. #[pallet::call_index(3)] - #[pallet::weight(::WeightInfo::sudo_set_serving_rate_limit())] + #[pallet::weight(Weight::from_parts(7_815_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_serving_rate_limit( origin: OriginFor, netuid: u16, @@ -199,7 +208,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum difficulty. #[pallet::call_index(4)] - #[pallet::weight(::WeightInfo::sudo_set_min_difficulty())] + #[pallet::weight(Weight::from_parts(19_780_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_difficulty( origin: OriginFor, netuid: u16, @@ -224,7 +235,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum difficulty. #[pallet::call_index(5)] - #[pallet::weight(::WeightInfo::sudo_set_max_difficulty())] + #[pallet::weight(Weight::from_parts(20_050_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_difficulty( origin: OriginFor, netuid: u16, @@ -249,7 +262,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the weights version key. #[pallet::call_index(6)] - #[pallet::weight(::WeightInfo::sudo_set_weights_version_key())] + #[pallet::weight(Weight::from_parts(19_990_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_weights_version_key( origin: OriginFor, netuid: u16, @@ -297,7 +312,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the weights set rate limit. #[pallet::call_index(7)] - #[pallet::weight(::WeightInfo::sudo_set_weights_set_rate_limit())] + #[pallet::weight(Weight::from_parts(20_050_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_weights_set_rate_limit( origin: OriginFor, netuid: u16, @@ -325,7 +342,9 @@ pub mod pallet { /// It is only callable by the root account, not changeable by the subnet owner. /// The extrinsic will call the Subtensor pallet to set the adjustment interval. #[pallet::call_index(8)] - #[pallet::weight(::WeightInfo::sudo_set_adjustment_interval())] + #[pallet::weight(Weight::from_parts(20_010_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_adjustment_interval( origin: OriginFor, netuid: u16, @@ -380,7 +399,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the adjustment beta. #[pallet::call_index(12)] - #[pallet::weight(::WeightInfo::sudo_set_max_weight_limit())] + #[pallet::weight(Weight::from_parts(19_240_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_weight_limit( origin: OriginFor, netuid: u16, @@ -405,7 +426,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the immunity period. #[pallet::call_index(13)] - #[pallet::weight(::WeightInfo::sudo_set_immunity_period())] + #[pallet::weight(Weight::from_parts(19_380_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_immunity_period( origin: OriginFor, netuid: u16, @@ -430,7 +453,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum allowed weights. #[pallet::call_index(14)] - #[pallet::weight(::WeightInfo::sudo_set_min_allowed_weights())] + #[pallet::weight(Weight::from_parts(19_770_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_allowed_weights( origin: OriginFor, netuid: u16, @@ -455,7 +480,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum allowed UIDs for a subnet. #[pallet::call_index(15)] - #[pallet::weight(::WeightInfo::sudo_set_max_allowed_uids())] + #[pallet::weight(Weight::from_parts(23_820_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_allowed_uids( origin: OriginFor, netuid: u16, @@ -483,7 +510,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the kappa. #[pallet::call_index(16)] - #[pallet::weight(::WeightInfo::sudo_set_kappa())] + #[pallet::weight(Weight::from_parts(19_590_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_kappa(origin: OriginFor, netuid: u16, kappa: u16) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; @@ -500,7 +529,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the rho. #[pallet::call_index(17)] - #[pallet::weight(::WeightInfo::sudo_set_rho())] + #[pallet::weight(Weight::from_parts(16_420_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_rho(origin: OriginFor, netuid: u16, rho: u16) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; @@ -517,7 +548,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the activity cutoff. #[pallet::call_index(18)] - #[pallet::weight(::WeightInfo::sudo_set_activity_cutoff())] + #[pallet::weight(Weight::from_parts(22_600_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_activity_cutoff( origin: OriginFor, netuid: u16, @@ -549,8 +582,8 @@ pub mod pallet { /// The extrinsic will call the Subtensor pallet to set the network registration allowed. #[pallet::call_index(19)] #[pallet::weight(( - Weight::from_parts(4_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + Weight::from_parts(8_696_000, 0) + .saturating_add(T::DbWeight::get().reads(0)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, Pays::No @@ -605,7 +638,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the target registrations per interval. #[pallet::call_index(21)] - #[pallet::weight(::WeightInfo::sudo_set_target_registrations_per_interval())] + #[pallet::weight(Weight::from_parts(19_830_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_target_registrations_per_interval( origin: OriginFor, netuid: u16, @@ -633,7 +668,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the minimum burn. #[pallet::call_index(22)] - #[pallet::weight(::WeightInfo::sudo_set_min_burn())] + #[pallet::weight(Weight::from_parts(19_840_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_burn( origin: OriginFor, netuid: u16, @@ -658,7 +695,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum burn. #[pallet::call_index(23)] - #[pallet::weight(::WeightInfo::sudo_set_max_burn())] + #[pallet::weight(Weight::from_parts(19_740_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_burn( origin: OriginFor, netuid: u16, @@ -683,7 +722,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the difficulty. #[pallet::call_index(24)] - #[pallet::weight(::WeightInfo::sudo_set_difficulty())] + #[pallet::weight(Weight::from_parts(20_280_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_difficulty( origin: OriginFor, netuid: u16, @@ -707,7 +748,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum allowed validators. #[pallet::call_index(25)] - #[pallet::weight(::WeightInfo::sudo_set_max_allowed_validators())] + #[pallet::weight(Weight::from_parts(25_210_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_allowed_validators( origin: OriginFor, netuid: u16, @@ -740,7 +783,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds moving average. #[pallet::call_index(26)] - #[pallet::weight(::WeightInfo::sudo_set_bonds_moving_average())] + #[pallet::weight(Weight::from_parts(20_270_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_moving_average( origin: OriginFor, netuid: u16, @@ -772,7 +817,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds penalty. #[pallet::call_index(60)] - #[pallet::weight(::WeightInfo::sudo_set_bonds_penalty())] + #[pallet::weight(Weight::from_parts(20_030_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_penalty( origin: OriginFor, netuid: u16, @@ -797,7 +844,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum registrations per block. #[pallet::call_index(27)] - #[pallet::weight(::WeightInfo::sudo_set_max_registrations_per_block())] + #[pallet::weight(Weight::from_parts(19_680_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_registrations_per_block( origin: OriginFor, netuid: u16, @@ -868,7 +917,9 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the tempo. #[pallet::call_index(30)] - #[pallet::weight(::WeightInfo::sudo_set_tempo())] + #[pallet::weight(Weight::from_parts(19_900_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_tempo(origin: OriginFor, netuid: u16, tempo: u16) -> DispatchResult { ensure_root(origin)?; ensure!( @@ -1086,7 +1137,9 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the value. #[pallet::call_index(49)] - #[pallet::weight(::WeightInfo::sudo_set_commit_reveal_weights_enabled())] + #[pallet::weight(Weight::from_parts(19_480_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_enabled( origin: OriginFor, netuid: u16, @@ -1285,7 +1338,9 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(57)] - #[pallet::weight(::WeightInfo::sudo_set_commit_reveal_weights_interval())] + #[pallet::weight(Weight::from_parts(20_490_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_interval( origin: OriginFor, netuid: u16, @@ -1319,7 +1374,9 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(58)] - #[pallet::weight(::WeightInfo::sudo_set_evm_chain_id())] + #[pallet::weight(Weight::from_parts(27_199_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn sudo_set_evm_chain_id(origin: OriginFor, chain_id: u64) -> DispatchResult { // Ensure the call is made by the root account ensure_root(origin)?; @@ -1344,7 +1401,9 @@ pub mod pallet { /// No change should be signaled while any change is pending. Returns an error if a change /// is already pending. #[pallet::call_index(59)] - #[pallet::weight(::WeightInfo::swap_authorities(next_authorities.len() as u32))] + #[pallet::weight(Weight::from_parts(11_550_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)))] pub fn schedule_grandpa_change( origin: OriginFor, // grandpa ID is always the same type, so we don't need to parametrize it via `Config` @@ -1486,6 +1545,63 @@ pub mod pallet { Ok(()) } + /// + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `netuid` - The unique identifier for the subnet. + /// * `steepness` - The new steepness for the alpha sigmoid function. + /// + /// # Errors + /// * `BadOrigin` - If the caller is not the root account. + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(68)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_alpha_sigmoid_steepness( + origin: OriginFor, + netuid: u16, + steepness: u16, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_alpha_sigmoid_steepness(netuid, steepness); + + log::debug!( + "AlphaSigmoidSteepnessSet( netuid: {:?}, steepness: {:?} )", + netuid, + steepness + ); + Ok(()) + } + + /// Enables or disables Yuma3 for a given subnet. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `enabled`: A boolean flag to enable or disable Yuma3. + /// + /// # Weight + /// This function has a fixed weight of 0 and is classified as an operational transaction that does not incur any fees. + #[pallet::call_index(69)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_yuma3_enabled( + origin: OriginFor, + netuid: u16, + enabled: bool, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::set_yuma3_enabled(netuid, enabled); + + Self::deposit_event(Event::Yuma3EnableToggled { netuid, enabled }); + log::debug!( + "Yuma3EnableToggled( netuid: {:?}, Enabled: {:?} ) ", + netuid, + enabled + ); + Ok(()) + } + /// Sets or updates the hotkey account associated with the owner of a specific subnet. /// /// This function allows either the root origin or the current subnet owner to set or update diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 99c11b7165..f8b3e6a9b6 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -80,6 +80,7 @@ parameter_types! { pub const TransactionByteFee: Balance = 100; pub const SDebug:u64 = 1; pub const InitialRho: u16 = 30; + pub const InitialAlphaSigmoidSteepness: u16 = 10; pub const InitialKappa: u16 = 32_767; pub const InitialTempo: u16 = 0; pub const SelfOwnership: u64 = 2; @@ -87,6 +88,7 @@ parameter_types! { pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty: u16 = u16::MAX; + pub const InitialBondsResetOn: bool = false; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18% honest number. @@ -129,9 +131,11 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On // pub const InitialHotkeyEmissionTempo: u64 = 1; // (DEPRECATED) // pub const InitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days + pub const InitialColdkeySwapRescheduleDuration: u64 = 24 * 60 * 60 / 12; // 1 day pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialTaoWeight: u64 = u64::MAX/10; // 10% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks @@ -157,6 +161,7 @@ impl pallet_subtensor::Config for Test { type InitialAdjustmentAlpha = InitialAdjustmentAlpha; type InitialTargetRegistrationsPerInterval = InitialTargetRegistrationsPerInterval; type InitialRho = InitialRho; + type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; @@ -167,6 +172,7 @@ impl pallet_subtensor::Config for Test { type InitialPruningScore = InitialPruningScore; type InitialBondsMovingAverage = InitialBondsMovingAverage; type InitialBondsPenalty = InitialBondsPenalty; + type InitialBondsResetOn = InitialBondsResetOn; type InitialMaxAllowedValidators = InitialMaxAllowedValidators; type InitialDefaultDelegateTake = InitialDefaultDelegateTake; type InitialMinDelegateTake = InitialMinDelegateTake; @@ -195,8 +201,10 @@ impl pallet_subtensor::Config for Test { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; type Preimages = (); type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; @@ -285,7 +293,6 @@ impl crate::Config for Test { type Aura = (); type Grandpa = GrandpaInterfaceImpl; type Balance = Balance; - type WeightInfo = (); } parameter_types! { @@ -311,7 +318,6 @@ impl pallet_scheduler::Config for Test { impl pallet_evm_chain_id::Config for Test {} impl pallet_drand::Config for Test { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; diff --git a/pallets/admin-utils/src/weights.rs b/pallets/admin-utils/src/weights.rs deleted file mode 100644 index 6ef9523546..0000000000 --- a/pallets/admin-utils/src/weights.rs +++ /dev/null @@ -1,854 +0,0 @@ - -//! Autogenerated weights for `pallet_admin_utils` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `morpheus`, CPU: `AMD EPYC 7513 32-Core Processor` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` - -// Executed Command: -// ./target/release/node-subtensor -// benchmark -// pallet -// --chain=local -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_admin_utils -// --extrinsic=* -// --steps -// 50 -// --repeat -// 20 -// --output=pallets/admin-utils/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_admin_utils`. -pub trait WeightInfo { - fn swap_authorities(a: u32, ) -> Weight; - fn sudo_set_min_delegate_take() -> Weight; - fn sudo_set_default_take() -> Weight; - fn sudo_set_serving_rate_limit() -> Weight; - fn sudo_set_max_difficulty() -> Weight; - fn sudo_set_min_difficulty() -> Weight; - fn sudo_set_weights_set_rate_limit() -> Weight; - fn sudo_set_weights_version_key() -> Weight; - fn sudo_set_bonds_moving_average() -> Weight; - fn sudo_set_bonds_penalty() -> Weight; - fn sudo_set_max_allowed_validators() -> Weight; - fn sudo_set_difficulty() -> Weight; - fn sudo_set_adjustment_interval() -> Weight; - fn sudo_set_target_registrations_per_interval() -> Weight; - fn sudo_set_activity_cutoff() -> Weight; - fn sudo_set_rho() -> Weight; - fn sudo_set_kappa() -> Weight; - fn sudo_set_max_allowed_uids() -> Weight; - fn sudo_set_min_allowed_weights() -> Weight; - fn sudo_set_validator_prune_len() -> Weight; - fn sudo_set_scaling_law_power() -> Weight; - fn sudo_set_immunity_period() -> Weight; - fn sudo_set_max_weight_limit() -> Weight; - fn sudo_set_max_registrations_per_block() -> Weight; - fn sudo_set_max_burn() -> Weight; - fn sudo_set_min_burn() -> Weight; - fn sudo_set_network_registration_allowed() -> Weight; - fn sudo_set_tempo() -> Weight; - fn sudo_set_commit_reveal_weights_interval() -> Weight; - fn sudo_set_commit_reveal_weights_enabled() -> Weight; - fn sudo_set_evm_chain_id() -> Weight; - fn schedule_grandpa_change(a: u32) -> Weight; -} - -/// Weights for `pallet_admin_utils` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Aura Authorities (r:0 w:1) - /// Proof: Aura Authorities (max_values: Some(1), max_size: Some(1025), added: 1520, mode: MaxEncodedLen) - /// The range of component `a` is `[0, 32]`. - fn swap_authorities(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `632` - // Estimated: `1127` - // Minimum execution time: 11_490_000 picoseconds. - Weight::from_parts(20_410_228, 1127) - // Standard Error: 8_309 - .saturating_add(Weight::from_parts(199_399, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_default_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_min_delegate_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule ServingRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule ServingRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_serving_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 27_700_000 picoseconds. - Weight::from_parts(28_290_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MinDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_110_000 picoseconds. - Weight::from_parts(46_909_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsSetRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsSetRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_set_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_349_000 picoseconds. - Weight::from_parts(46_970_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsVersionKey (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsVersionKey (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_version_key() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_940_000 picoseconds. - Weight::from_parts(47_460_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsMovingAverage (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsMovingAverage (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_moving_average() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsPenalty (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsPenalty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_penalty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:1 w:0) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedValidators (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedValidators (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_validators() -> Weight { - // Proof Size summary in bytes: - // Measured: `1154` - // Estimated: `8412` - // Minimum execution time: 52_599_000 picoseconds. - Weight::from_parts(53_640_000, 8412) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Difficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule Difficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_240_000 picoseconds. - Weight::from_parts(47_130_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule AdjustmentInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule AdjustmentInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_adjustment_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_430_000 picoseconds. - Weight::from_parts(46_790_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule TargetRegistrationsPerInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule TargetRegistrationsPerInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_target_registrations_per_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_899_000 picoseconds. - Weight::from_parts(47_099_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ActivityCutoff (r:0 w:1) - /// Proof Skipped: SubtensorModule ActivityCutoff (max_values: None, max_size: None, mode: Measured) - fn sudo_set_activity_cutoff() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_029_000 picoseconds. - Weight::from_parts(46_759_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Rho (r:0 w:1) - /// Proof Skipped: SubtensorModule Rho (max_values: None, max_size: None, mode: Measured) - fn sudo_set_rho() -> Weight { - // Proof Size summary in bytes: - // Measured: `903` - // Estimated: `4281` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_820_000, 4281) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Kappa (r:0 w:1) - /// Proof Skipped: SubtensorModule Kappa (max_values: None, max_size: None, mode: Measured) - fn sudo_set_kappa() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_620_000 picoseconds. - Weight::from_parts(46_440_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule SubnetworkN (r:1 w:0) - /// Proof Skipped: SubtensorModule SubnetworkN (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_uids() -> Weight { - // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `8301` - // Minimum execution time: 50_270_000 picoseconds. - Weight::from_parts(51_149_000, 8301) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinAllowedWeights (r:0 w:1) - /// Proof Skipped: SubtensorModule MinAllowedWeights (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_allowed_weights() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_990_000 picoseconds. - Weight::from_parts(47_390_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ValidatorPruneLen (r:0 w:1) - /// Proof Skipped: SubtensorModule ValidatorPruneLen (max_values: None, max_size: None, mode: Measured) - fn sudo_set_validator_prune_len() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_939_000 picoseconds. - Weight::from_parts(46_960_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ScalingLawPower (r:0 w:1) - /// Proof Skipped: SubtensorModule ScalingLawPower (max_values: None, max_size: None, mode: Measured) - fn sudo_set_scaling_law_power() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_480_000 picoseconds. - Weight::from_parts(46_590_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ImmunityPeriod (r:0 w:1) - /// Proof Skipped: SubtensorModule ImmunityPeriod (max_values: None, max_size: None, mode: Measured) - fn sudo_set_immunity_period() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_289_000 picoseconds. - Weight::from_parts(46_679_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxWeightsLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxWeightsLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_weight_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_850_000 picoseconds. - Weight::from_parts(46_589_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxRegistrationsPerBlock (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxRegistrationsPerBlock (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_registrations_per_block() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_330_000 picoseconds. - Weight::from_parts(46_490_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_390_000 picoseconds. - Weight::from_parts(46_339_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MinBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_189_000 picoseconds. - Weight::from_parts(46_109_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworkPowRegistrationAllowed (r:0 w:1) - /// Proof Skipped: SubtensorModule NetworkPowRegistrationAllowed (max_values: None, max_size: None, mode: Measured) - fn sudo_set_network_registration_allowed() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 33_600_000 picoseconds. - Weight::from_parts(34_599_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Tempo (r:0 w:1) - /// Proof Skipped: SubtensorModule Tempo (max_values: None, max_size: None, mode: Measured) - fn sudo_set_tempo() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 44_739_000 picoseconds. - Weight::from_parts(45_489_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - fn sudo_set_commit_reveal_weights_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `3921` - // Minimum execution time: 19_070_000 picoseconds. - Weight::from_parts(19_380_000, 456) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - fn sudo_set_commit_reveal_weights_enabled() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - fn sudo_set_evm_chain_id() -> Weight { - Weight::from_parts(20_200_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - - fn schedule_grandpa_change(_a: u32) -> Weight { - // TODO should be replaced by benchmarked weights - Weight::default() - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Aura Authorities (r:0 w:1) - /// Proof: Aura Authorities (max_values: Some(1), max_size: Some(1025), added: 1520, mode: MaxEncodedLen) - /// The range of component `a` is `[0, 32]`. - fn swap_authorities(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `632` - // Estimated: `1127` - // Minimum execution time: 11_490_000 picoseconds. - Weight::from_parts(20_410_228, 1127) - // Standard Error: 8_309 - .saturating_add(Weight::from_parts(199_399, 0).saturating_mul(a.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_default_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_min_delegate_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule ServingRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule ServingRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_serving_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 27_700_000 picoseconds. - Weight::from_parts(28_290_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MinDifficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_110_000 picoseconds. - Weight::from_parts(46_909_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsSetRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsSetRateLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_set_rate_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_349_000 picoseconds. - Weight::from_parts(46_970_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsVersionKey (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsVersionKey (max_values: None, max_size: None, mode: Measured) - fn sudo_set_weights_version_key() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_940_000 picoseconds. - Weight::from_parts(47_460_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsMovingAverage (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsMovingAverage (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_moving_average() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsPenalty (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsPenalty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_bonds_penalty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:1 w:0) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedValidators (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedValidators (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_validators() -> Weight { - // Proof Size summary in bytes: - // Measured: `1154` - // Estimated: `8412` - // Minimum execution time: 52_599_000 picoseconds. - Weight::from_parts(53_640_000, 8412) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Difficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule Difficulty (max_values: None, max_size: None, mode: Measured) - fn sudo_set_difficulty() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_240_000 picoseconds. - Weight::from_parts(47_130_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule AdjustmentInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule AdjustmentInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_adjustment_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_430_000 picoseconds. - Weight::from_parts(46_790_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule TargetRegistrationsPerInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule TargetRegistrationsPerInterval (max_values: None, max_size: None, mode: Measured) - fn sudo_set_target_registrations_per_interval() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_899_000 picoseconds. - Weight::from_parts(47_099_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ActivityCutoff (r:0 w:1) - /// Proof Skipped: SubtensorModule ActivityCutoff (max_values: None, max_size: None, mode: Measured) - fn sudo_set_activity_cutoff() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_029_000 picoseconds. - Weight::from_parts(46_759_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Rho (r:0 w:1) - /// Proof Skipped: SubtensorModule Rho (max_values: None, max_size: None, mode: Measured) - fn sudo_set_rho() -> Weight { - // Proof Size summary in bytes: - // Measured: `903` - // Estimated: `4281` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_820_000, 4281) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Kappa (r:0 w:1) - /// Proof Skipped: SubtensorModule Kappa (max_values: None, max_size: None, mode: Measured) - fn sudo_set_kappa() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_620_000 picoseconds. - Weight::from_parts(46_440_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule SubnetworkN (r:1 w:0) - /// Proof Skipped: SubtensorModule SubnetworkN (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_allowed_uids() -> Weight { - // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `8301` - // Minimum execution time: 50_270_000 picoseconds. - Weight::from_parts(51_149_000, 8301) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinAllowedWeights (r:0 w:1) - /// Proof Skipped: SubtensorModule MinAllowedWeights (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_allowed_weights() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_990_000 picoseconds. - Weight::from_parts(47_390_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ValidatorPruneLen (r:0 w:1) - /// Proof Skipped: SubtensorModule ValidatorPruneLen (max_values: None, max_size: None, mode: Measured) - fn sudo_set_validator_prune_len() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_939_000 picoseconds. - Weight::from_parts(46_960_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ScalingLawPower (r:0 w:1) - /// Proof Skipped: SubtensorModule ScalingLawPower (max_values: None, max_size: None, mode: Measured) - fn sudo_set_scaling_law_power() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_480_000 picoseconds. - Weight::from_parts(46_590_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ImmunityPeriod (r:0 w:1) - /// Proof Skipped: SubtensorModule ImmunityPeriod (max_values: None, max_size: None, mode: Measured) - fn sudo_set_immunity_period() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_289_000 picoseconds. - Weight::from_parts(46_679_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxWeightsLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxWeightsLimit (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_weight_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_850_000 picoseconds. - Weight::from_parts(46_589_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxRegistrationsPerBlock (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxRegistrationsPerBlock (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_registrations_per_block() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_330_000 picoseconds. - Weight::from_parts(46_490_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_max_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_390_000 picoseconds. - Weight::from_parts(46_339_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MinBurn (max_values: None, max_size: None, mode: Measured) - fn sudo_set_min_burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_189_000 picoseconds. - Weight::from_parts(46_109_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworkPowRegistrationAllowed (r:0 w:1) - /// Proof Skipped: SubtensorModule NetworkPowRegistrationAllowed (max_values: None, max_size: None, mode: Measured) - fn sudo_set_network_registration_allowed() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 33_600_000 picoseconds. - Weight::from_parts(34_599_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Tempo (r:0 w:1) - /// Proof Skipped: SubtensorModule Tempo (max_values: None, max_size: None, mode: Measured) - fn sudo_set_tempo() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 44_739_000 picoseconds. - Weight::from_parts(45_489_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - fn sudo_set_commit_reveal_weights_interval() -> Weight { - // -- Extrinsic Time -- - // Model: - // Time ~= 19.38 - // µs - // Reads = 1 - // Writes = 1 - // Recorded proof Size = 456 - Weight::from_parts(19_380_000, 456) - .saturating_add(RocksDbWeight::get().reads(1)) - .saturating_add(RocksDbWeight::get().writes(1)) - } - fn sudo_set_commit_reveal_weights_enabled() -> Weight { - // -- Extrinsic Time -- - // Model: - // Time ~= 19.78 - // µs - // Reads = 1 - // Writes = 1 - // Recorded proof Size = 456 - Weight::from_parts(19_780_000, 456) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - fn sudo_set_evm_chain_id() -> Weight { - Weight::from_parts(20_200_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - fn schedule_grandpa_change(_a: u32) -> Weight { - // TODO should be replaced by benchmarked weights - Weight::default() - } -} diff --git a/pallets/commitments/src/benchmarking.rs b/pallets/commitments/src/benchmarking.rs index 54247bb9d6..e66f2a07e8 100644 --- a/pallets/commitments/src/benchmarking.rs +++ b/pallets/commitments/src/benchmarking.rs @@ -35,7 +35,6 @@ mod benchmarks { #[benchmark] fn set_commitment() { - // The target user let netuid = 1; let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -56,5 +55,15 @@ mod benchmarks { ); } + #[benchmark] + fn set_max_space() { + let new_space: u32 = 1_000; + + #[extrinsic_call] + _(RawOrigin::Root, new_space); + + assert_eq!(MaxSpace::::get(), new_space); + } + //impl_benchmark_test_suite!(Commitments, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 11e1ae76ee..d0d8d14c9b 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -56,6 +56,9 @@ pub mod pallet { /// Interface to access-limit metadata commitments type CanCommit: CanCommit; + /// Interface to trigger other pallets when metadata is committed + type OnMetadataCommitment: OnMetadataCommitment; + /// The maximum number of additional fields that can be added to a commitment #[pallet::constant] type MaxFields: Get + TypeInfo + 'static; @@ -68,15 +71,11 @@ pub mod pallet { #[pallet::constant] type FieldDeposit: Get>; - /// The rate limit for commitments - #[pallet::constant] - type DefaultRateLimit: Get>; - /// Used to retreive the given subnet's tempo type TempoInterface: GetTempoInterface; } - /// Used to retreive the given subnet's tempo + /// Used to retreive the given subnet's tempo pub trait GetTempoInterface { /// Used to retreive the epoch index for the given subnet. fn get_epoch_index(netuid: u16, cur_block: u64) -> u64; @@ -116,24 +115,12 @@ pub mod pallet { TooManyFieldsInCommitmentInfo, /// Account is not allow to make commitments to the chain AccountNotAllowedCommit, - /// Account is trying to commit data too fast, rate limit exceeded - CommitmentSetRateLimitExceeded, /// Space Limit Exceeded for the current interval SpaceLimitExceeded, /// Indicates that unreserve returned a leftover, which is unexpected. UnexpectedUnreserveLeftover, } - #[pallet::type_value] - /// *DEPRECATED* Default value for commitment rate limit. - pub fn DefaultRateLimit() -> BlockNumberFor { - T::DefaultRateLimit::get() - } - - /// *DEPRECATED* The rate limit for commitments - #[pallet::storage] - pub type RateLimit = StorageValue<_, BlockNumberFor, ValueQuery, DefaultRateLimit>; - /// Tracks all CommitmentOf that have at least one timelocked field. #[pallet::storage] #[pallet::getter(fn timelocked_index)] @@ -164,6 +151,19 @@ pub mod pallet { BlockNumberFor, OptionQuery, >; + + #[pallet::storage] + #[pallet::getter(fn last_bonds_reset)] + pub(super) type LastBondsReset = StorageDoubleMap< + _, + Identity, + u16, + Twox64Concat, + T::AccountId, + BlockNumberFor, + OptionQuery, + >; + #[pallet::storage] #[pallet::getter(fn revealed_commitments)] pub(super) type RevealedCommitments = StorageDoubleMap< @@ -198,7 +198,9 @@ pub mod pallet { /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( - ::WeightInfo::set_commitment(), + Weight::from_parts(38_000_000, 0) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)), DispatchClass::Operational, Pays::No ))] @@ -207,7 +209,7 @@ pub mod pallet { netuid: u16, info: Box>, ) -> DispatchResult { - let who = ensure_signed(origin)?; + let who = ensure_signed(origin.clone())?; ensure!( T::CanCommit::can_commit(netuid, &who), Error::::AccountNotAllowedCommit @@ -238,6 +240,16 @@ pub mod pallet { usage.used_space = 0; } + // check if ResetBondsFlag is set in the fields + for field in info.fields.iter() { + if let Data::ResetBondsFlag = field { + // track when bonds reset was last triggered + >::insert(netuid, &who, cur_block); + T::OnMetadataCommitment::on_metadata_commitment(netuid, &who); + break; + } + } + let max_allowed = MaxSpace::::get() as u64; ensure!( usage.used_space.saturating_add(required_space) <= max_allowed, @@ -306,23 +318,27 @@ pub mod pallet { Ok(()) } - /// Sudo-set the commitment rate limit + /// *DEPRECATED* Sudo-set the commitment rate limit #[pallet::call_index(1)] #[pallet::weight(( - ::WeightInfo::set_rate_limit(), - DispatchClass::Operational, - Pays::No - ))] - pub fn set_rate_limit(origin: OriginFor, rate_limit_blocks: u32) -> DispatchResult { + Weight::from_parts(3_596_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::No + ))] + pub fn set_rate_limit(origin: OriginFor, _rate_limit_blocks: u32) -> DispatchResult { ensure_root(origin)?; - RateLimit::::set(rate_limit_blocks.into()); + // RateLimit::::set(rate_limit_blocks.into()); Ok(()) } /// Sudo-set MaxSpace #[pallet::call_index(2)] #[pallet::weight(( - ::WeightInfo::set_rate_limit(), + Weight::from_parts(3_556_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational, Pays::No ))] @@ -359,6 +375,14 @@ impl CanCommit for () { } } +pub trait OnMetadataCommitment { + fn on_metadata_commitment(netuid: u16, account: &AccountId); +} + +impl OnMetadataCommitment for () { + fn on_metadata_commitment(_: u16, _: &A) {} +} + /************************************************************ CallType definition ************************************************************/ diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index c8f6b1e1b2..4e6aa123bd 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -100,8 +100,8 @@ impl pallet_commitments::Config for Test { type CanCommit = TestCanCommit; type FieldDeposit = ConstU64<0>; type InitialDeposit = ConstU64<0>; - type DefaultRateLimit = ConstU64<0>; type TempoInterface = MockTempoInterface; + type OnMetadataCommitment = (); } pub struct MockTempoInterface; @@ -118,7 +118,6 @@ impl pallet_commitments::GetTempoInterface for MockTempoInterface { impl pallet_drand::Config for Test { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = test_crypto::TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index c9b14d188b..55e406eb53 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -3,8 +3,8 @@ use sp_std::prelude::*; #[cfg(test)] use crate::{ - CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, RateLimit, - Registration, RevealedCommitments, TimelockedIndex, UsedSpaceOf, + CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, Registration, + RevealedCommitments, TimelockedIndex, UsedSpaceOf, mock::{ Balances, DRAND_QUICKNET_SIG_2000_HEX, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, TestMaxFields, insert_drand_pulse, new_test_ext, produce_ciphertext, @@ -34,6 +34,7 @@ fn manual_data_type_info() { Data::ShaThree256(_) => "ShaThree256".to_string(), Data::Raw(bytes) => format!("Raw{}", bytes.len()), Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), + Data::ResetBondsFlag => "ResetBondsFlag".to_string(), }; if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { let variant = variant @@ -63,6 +64,7 @@ fn manual_data_type_info() { let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes encrypted_len + reveal_round_len } + Data::ResetBondsFlag => 0, }; assert_eq!( encoded.len() as u32 - 1, // Subtract variant byte @@ -89,6 +91,7 @@ fn manual_data_type_info() { Data::Sha256(Default::default()), Data::Keccak256(Default::default()), Data::ShaThree256(Default::default()), + Data::ResetBondsFlag, ]; // Add Raw instances for all possible sizes @@ -150,39 +153,6 @@ fn set_commitment_too_many_fields_panics() { }); } -// DEPRECATED -// #[test] -// fn set_commitment_rate_limit_exceeded() { -// new_test_ext().execute_with(|| { -// let rate_limit = ::DefaultRateLimit::get(); -// System::::set_block_number(1); -// let info = Box::new(CommitmentInfo { -// fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), -// }); - -// assert_ok!(Pallet::::set_commitment( -// RuntimeOrigin::signed(1), -// 1, -// info.clone() -// )); - -// // Set block number to just before rate limit expires -// System::::set_block_number(rate_limit); -// assert_noop!( -// Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), -// Error::::CommitmentSetRateLimitExceeded -// ); - -// // Set block number to after rate limit -// System::::set_block_number(rate_limit + 1); -// assert_ok!(Pallet::::set_commitment( -// RuntimeOrigin::signed(1), -// 1, -// info -// )); -// }); -// } - #[test] fn set_commitment_updates_deposit() { new_test_ext().execute_with(|| { @@ -226,22 +196,6 @@ fn set_commitment_updates_deposit() { }); } -#[test] -fn set_rate_limit_works() { - new_test_ext().execute_with(|| { - let default_rate_limit: u64 = ::DefaultRateLimit::get(); - assert_eq!(RateLimit::::get(), default_rate_limit); - - assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); - assert_eq!(RateLimit::::get(), 200); - - assert_noop!( - Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), - sp_runtime::DispatchError::BadOrigin - ); - }); -} - #[test] fn event_emission_works() { new_test_ext().execute_with(|| { diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 0f1d2302a5..a537514f61 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -58,6 +58,8 @@ pub enum Data { encrypted: BoundedVec>, reveal_round: u64, }, + /// Flag to trigger bonds reset for subnet + ResetBondsFlag, } impl Data { @@ -79,6 +81,7 @@ impl Data { | Data::Keccak256(arr) | Data::ShaThree256(arr) => arr.len() as u64, Data::TimelockEncrypted { encrypted, .. } => encrypted.len() as u64, + Data::ResetBondsFlag => 0, } } } @@ -108,6 +111,7 @@ impl Decode for Data { reveal_round, } } + 135 => Data::ResetBondsFlag, _ => return Err(codec::Error::from("invalid leading byte")), }) } @@ -136,6 +140,7 @@ impl Encode for Data { r.extend_from_slice(&reveal_round.encode()); r } + Data::ResetBondsFlag => vec![135], } } } @@ -158,7 +163,9 @@ impl TypeInfo for Data { type Identity = Self; fn type_info() -> Type { - let variants = Variants::new().variant("None", |v| v.index(0)); + let variants = Variants::new() + .variant("None", |v| v.index(0)) + .variant("ResetBondsFlag", |v| v.index(135)); // create a variant for all sizes of Raw data from 0-32 let variants = data_raw_variants!( @@ -321,7 +328,8 @@ impl TypeInfo for Data { }) .field(|f| f.name("reveal_round").ty::()), ) - }); + }) + .variant("ResetBondsFlag", |v| v.index(135)); Type::builder() .path(Path::new("Data", module_path!())) diff --git a/pallets/commitments/src/weights.rs b/pallets/commitments/src/weights.rs index b91017e050..e1bd05fcc7 100644 --- a/pallets/commitments/src/weights.rs +++ b/pallets/commitments/src/weights.rs @@ -53,7 +53,7 @@ impl WeightInfo for SubstrateWeight { fn set_rate_limit() -> Weight { Weight::from_parts(10_000_000, 2000) .saturating_add(RocksDbWeight::get().reads(1_u64)) - } + } } // For backwards compatibility and tests. @@ -76,5 +76,5 @@ impl WeightInfo for () { fn set_rate_limit() -> Weight { Weight::from_parts(10_000_000, 2000) .saturating_add(RocksDbWeight::get().reads(1_u64)) - } -} \ No newline at end of file + } +} diff --git a/pallets/crowdloan/Cargo.toml b/pallets/crowdloan/Cargo.toml index 1739a85b7c..e8d582fa44 100644 --- a/pallets/crowdloan/Cargo.toml +++ b/pallets/crowdloan/Cargo.toml @@ -21,6 +21,7 @@ frame-support.workspace = true frame-system.workspace = true sp-runtime.workspace = true sp-std.workspace = true +log = { workspace = true } [dev-dependencies] pallet-balances = { default-features = true, workspace = true } @@ -39,6 +40,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-io/std", + "log/std", "sp-core/std", "pallet-balances/std", "pallet-preimage/std", diff --git a/pallets/crowdloan/README.md b/pallets/crowdloan/README.md index f0b084b9ce..3d67fee33a 100644 --- a/pallets/crowdloan/README.md +++ b/pallets/crowdloan/README.md @@ -4,11 +4,11 @@ A pallet that enables the creation and management of generic crowdloans for tran Users of this pallet can create a crowdloan by providing a deposit, a cap, an end block, an optional target address and an optional call. -Users can contribute to a crowdloan by providing funds to the crowdloan they choose to support. +Users can contribute to a crowdloan by providing funds to the crowdloan they choose to support. The contribution can be withdrawn while the crowdloan is not finalized. Once the crowdloan is finalized, the funds will be transferred to the target address if provided; otherwise, the end user is expected to transfer them manually on-chain if the call is a pallet extrinsic. The call will be dispatched with the current crowdloan ID stored as a temporary item. -If the crowdloan fails to reach the cap, the initial deposit will be returned to the creator, and contributions will be refunded to the contributors. +If the crowdloan fails to reach the cap, the creator can decide to refund all contributors and dissolve the crowdloan. The initial deposit will be refunded. ## Overview diff --git a/pallets/crowdloan/src/benchmarking.rs b/pallets/crowdloan/src/benchmarking.rs index 5dab0c1b91..0891baf5af 100644 --- a/pallets/crowdloan/src/benchmarking.rs +++ b/pallets/crowdloan/src/benchmarking.rs @@ -68,6 +68,7 @@ mod benchmarks { target_address: Some(target_address.clone()), call: Some(T::Preimages::bound(*call).unwrap()), finalized: false, + contributors_count: 1, }) ); // ensure the creator has been deducted the deposit @@ -190,11 +191,7 @@ mod benchmarks { frame_system::Pallet::::set_block_number(end); #[extrinsic_call] - _( - RawOrigin::Signed(contributor.clone()), - contributor.clone(), - crowdloan_id, - ); + _(RawOrigin::Signed(contributor.clone()), crowdloan_id); // ensure the creator contribution has been removed assert_eq!(Contributions::::get(crowdloan_id, &contributor), None); @@ -310,9 +307,12 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Signed(creator.clone()), crowdloan_id); - // ensure the creator has been refunded and the contributions is removed - assert_eq!(CurrencyOf::::balance(&creator), deposit); - assert_eq!(Contributions::::get(crowdloan_id, &creator), None); + // ensure the creator has not been refunded and contribution is the actual initial deposit + assert_eq!(CurrencyOf::::balance(&creator), 0); + assert_eq!( + Contributions::::get(crowdloan_id, &creator), + Some(deposit) + ); // ensure each contributor has been refunded and the contributions is removed for i in 0..contributors { let contributor: T::AccountId = account::("contributor", i, SEED); @@ -322,10 +322,10 @@ mod benchmarks { // ensure the crowdloan account has been deducted the contributions assert_eq!( CurrencyOf::::balance(&Pallet::::funds_account(crowdloan_id)), - 0 + deposit ); // ensure the raised amount is updated correctly - assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.raised == 0)); + assert!(Crowdloans::::get(crowdloan_id).is_some_and(|c| c.raised == deposit)); // ensure the event is emitted assert_last_event::(Event::::AllRefunded { crowdloan_id }.into()); } diff --git a/pallets/crowdloan/src/lib.rs b/pallets/crowdloan/src/lib.rs index 5413bd689b..1d4ed4e263 100644 --- a/pallets/crowdloan/src/lib.rs +++ b/pallets/crowdloan/src/lib.rs @@ -7,7 +7,7 @@ extern crate alloc; -use alloc::{boxed::Box, vec, vec::Vec}; +use alloc::{boxed::Box, vec}; use codec::{Decode, Encode}; use frame_support::{ PalletId, @@ -25,6 +25,7 @@ use frame_support::{ use frame_system::pallet_prelude::*; use scale_info::TypeInfo; use sp_runtime::traits::CheckedSub; +use sp_std::vec::Vec; use weights::WeightInfo; pub use pallet::*; @@ -33,6 +34,7 @@ use subtensor_macros::freeze_struct; pub type CrowdloanId = u32; mod benchmarking; +mod migrations; mod mock; mod tests; pub mod weights; @@ -42,11 +44,14 @@ pub type CurrencyOf = ::Currency; pub type BalanceOf = as fungible::Inspect<::AccountId>>::Balance; +// Define a maximum length for the migration key +type MigrationKeyMaxLen = ConstU32<128>; + pub type BoundedCallOf = Bounded<::RuntimeCall, ::Hashing>; /// A struct containing the information about a crowdloan. -#[freeze_struct("6b86ccf70fc1b8f1")] +#[freeze_struct("5db9538284491545")] #[derive(Encode, Decode, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CrowdloanInfo { /// The creator of the crowdloan. @@ -71,6 +76,8 @@ pub struct CrowdloanInfo { pub call: Option, /// Whether the crowdloan has been finalized. pub finalized: bool, + /// The number of contributors to the crowdloan. + pub contributors_count: u32, } pub type CrowdloanInfoOf = CrowdloanInfo< @@ -134,6 +141,10 @@ pub mod pallet { /// The maximum number of contributors that can be refunded in a single refund. #[pallet::constant] type RefundContributorsLimit: Get; + + // The maximum number of contributors that can contribute to a crowdloan. + #[pallet::constant] + type MaxContributors: Get; } /// A map of crowdloan ids to their information. @@ -162,6 +173,11 @@ pub mod pallet { #[pallet::storage] pub type CurrentCrowdloanId = StorageValue<_, CrowdloanId, OptionQuery>; + /// Storage for the migration run status. + #[pallet::storage] + pub type HasMigrationRun = + StorageMap<_, Identity, BoundedVec, bool, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -251,6 +267,23 @@ pub mod pallet { CallUnavailable, /// The crowdloan is not ready to be dissolved, it still has contributions. NotReadyToDissolve, + /// The deposit cannot be withdrawn from the crowdloan. + DepositCannotBeWithdrawn, + /// The maximum number of contributors has been reached. + MaxContributorsReached, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut weight = frame_support::weights::Weight::from_parts(0, 0); + + weight = weight + // Add the contributors count for each crowdloan + .saturating_add(migrations::migrate_add_contributors_count::()); + + weight + } } #[pallet::call] @@ -261,7 +294,7 @@ pub mod pallet { /// /// The initial deposit will be transfered to the crowdloan account and will be refunded /// in case the crowdloan fails to raise the cap. Additionally, the creator will pay for - /// the execution of the call + /// the execution of the call. /// /// The dispatch origin for this call must be _Signed_. /// @@ -340,6 +373,7 @@ pub mod pallet { target_address, call, finalized: false, + contributors_count: 1, }; Crowdloans::::insert(crowdloan_id, &crowdloan); @@ -396,6 +430,12 @@ pub mod pallet { Error::::ContributionTooLow ); + // Ensure the crowdloan has not reached the maximum number of contributors + ensure!( + crowdloan.contributors_count < T::MaxContributors::get(), + Error::::MaxContributorsReached + ); + // Ensure contribution does not overflow the actual raised amount // and it does not exceed the cap let left_to_raise = crowdloan @@ -413,11 +453,21 @@ pub mod pallet { .checked_add(amount) .ok_or(Error::::Overflow)?; - // Compute the new total contribution and ensure it does not overflow. - let contribution = Contributions::::get(crowdloan_id, &contributor) - .unwrap_or(Zero::zero()) - .checked_add(amount) - .ok_or(Error::::Overflow)?; + // Compute the new total contribution and ensure it does not overflow, we + // also increment the contributor count if the contribution is new. + let contribution = + if let Some(contribution) = Contributions::::get(crowdloan_id, &contributor) { + contribution + .checked_add(amount) + .ok_or(Error::::Overflow)? + } else { + // We have a new contribution + crowdloan.contributors_count = crowdloan + .contributors_count + .checked_add(1) + .ok_or(Error::::Overflow)?; + amount + }; // Ensure contributor has enough balance to pay ensure!( @@ -446,47 +496,53 @@ pub mod pallet { /// Withdraw a contribution from an active (not yet finalized or dissolved) crowdloan. /// - /// The origin doesn't needs to be the contributor, it can be any account, - /// making it possible for someone to trigger a refund for a contributor. + /// Only contributions over the deposit can be withdrawn by the creator. /// /// The dispatch origin for this call must be _Signed_. /// /// Parameters: - /// - `contributor`: The contributor to withdraw from. /// - `crowdloan_id`: The id of the crowdloan to withdraw from. #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::withdraw())] pub fn withdraw( origin: OriginFor, - contributor: T::AccountId, #[pallet::compact] crowdloan_id: CrowdloanId, ) -> DispatchResult { - ensure_signed(origin)?; + let who = ensure_signed(origin)?; let mut crowdloan = Self::ensure_crowdloan_exists(crowdloan_id)?; ensure!(!crowdloan.finalized, Error::::AlreadyFinalized); // Ensure contributor has balance left in the crowdloan account - let amount = - Contributions::::get(crowdloan_id, &contributor).unwrap_or_else(Zero::zero); + let mut amount = Contributions::::get(crowdloan_id, &who).unwrap_or_else(Zero::zero); ensure!(amount > Zero::zero(), Error::::NoContribution); + if who == crowdloan.creator { + // Ensure the deposit is kept + amount = amount.saturating_sub(crowdloan.deposit); + ensure!(amount > Zero::zero(), Error::::DepositCannotBeWithdrawn); + Contributions::::insert(crowdloan_id, &who, crowdloan.deposit); + } else { + Contributions::::remove(crowdloan_id, &who); + crowdloan.contributors_count = crowdloan + .contributors_count + .checked_sub(1) + .ok_or(Error::::Underflow)?; + } + CurrencyOf::::transfer( &crowdloan.funds_account, - &contributor, + &who, amount, Preservation::Expendable, )?; - // Remove the contribution from the contributions map and update - // crowdloan raised amount to reflect the withdrawal. - Contributions::::remove(crowdloan_id, &contributor); + // Update the crowdloan raised amount to reflect the withdrawal. crowdloan.raised = crowdloan.raised.saturating_sub(amount); - Crowdloans::::insert(crowdloan_id, &crowdloan); Self::deposit_event(Event::::Withdrew { - contributor, + contributor: who, crowdloan_id, amount, }); @@ -570,7 +626,7 @@ pub mod pallet { /// Refund a failed crowdloan. /// - /// The call will try to refund all contributors up to the limit defined by the `RefundContributorsLimit`. + /// The call will try to refund all contributors (excluding the creator) up to the limit defined by the `RefundContributorsLimit`. /// If the limit is reached, the call will stop and the crowdloan will be marked as partially refunded. /// It may be needed to dispatch this call multiple times to refund all contributors. /// @@ -595,9 +651,13 @@ pub mod pallet { let mut refunded_contributors: Vec = vec![]; let mut refund_count = 0; + // Assume everyone can be refunded let mut all_refunded = true; - let contributions = Contributions::::iter_prefix(crowdloan_id); + + // We try to refund all contributors (excluding the creator) + let contributions = Contributions::::iter_prefix(crowdloan_id) + .filter(|(contributor, _)| *contributor != crowdloan.creator); for (contributor, amount) in contributions { if refund_count >= T::RefundContributorsLimit::get() { // Not everyone can be refunded @@ -617,6 +677,10 @@ pub mod pallet { refund_count = refund_count.checked_add(1).ok_or(Error::::Overflow)?; } + crowdloan.contributors_count = crowdloan + .contributors_count + .checked_sub(refund_count) + .ok_or(Error::::Underflow)?; Crowdloans::::insert(crowdloan_id, &crowdloan); // Clear refunded contributors @@ -638,7 +702,7 @@ pub mod pallet { /// Dissolve a crowdloan. /// /// The crowdloan will be removed from the storage. - /// All contributions must have been refunded before the crowdloan can be dissolved. + /// All contributions must have been refunded before the crowdloan can be dissolved (except the creator's one). /// /// The dispatch origin for this call must be _Signed_ and must be the creator of the crowdloan. /// @@ -657,9 +721,24 @@ pub mod pallet { // Only the creator can dissolve the crowdloan ensure!(who == crowdloan.creator, Error::::InvalidOrigin); - // It can only be dissolved if the raised amount is 0, meaning - // there is no contributions or every contribution has been refunded - ensure!(crowdloan.raised == 0, Error::::NotReadyToDissolve); + + // It can only be dissolved if the raised amount is the creator's contribution, + // meaning there is no contributions or every contribution has been refunded + let creator_contribution = Contributions::::get(crowdloan_id, &crowdloan.creator) + .ok_or(Error::::NoContribution)?; + ensure!( + creator_contribution == crowdloan.raised, + Error::::NotReadyToDissolve + ); + + // Refund the creator's contribution + CurrencyOf::::transfer( + &crowdloan.funds_account, + &crowdloan.creator, + creator_contribution, + Preservation::Expendable, + )?; + Contributions::::remove(crowdloan_id, &crowdloan.creator); // Clear the call from the preimage storage if let Some(call) = crowdloan.call { @@ -698,7 +777,7 @@ pub mod pallet { // The new min contribution should be greater than absolute minimum contribution. ensure!( - new_min_contribution > T::AbsoluteMinimumContribution::get(), + new_min_contribution >= T::AbsoluteMinimumContribution::get(), Error::::MinimumContributionTooLow ); @@ -771,7 +850,7 @@ pub mod pallet { ensure!(who == crowdloan.creator, Error::::InvalidOrigin); // The new cap should be greater than the actual raised amount. - ensure!(new_cap > crowdloan.raised, Error::::CapTooLow); + ensure!(new_cap >= crowdloan.raised, Error::::CapTooLow); crowdloan.cap = new_cap; Crowdloans::::insert(crowdloan_id, &crowdloan); diff --git a/pallets/crowdloan/src/migrations/migrate_add_contributors_count.rs b/pallets/crowdloan/src/migrations/migrate_add_contributors_count.rs new file mode 100644 index 0000000000..3b094843ce --- /dev/null +++ b/pallets/crowdloan/src/migrations/migrate_add_contributors_count.rs @@ -0,0 +1,188 @@ +use alloc::string::String; +use frame_support::{BoundedVec, migration::storage_key_iter, traits::Get, weights::Weight}; +use subtensor_macros::freeze_struct; + +use crate::*; + +mod old_storage { + use super::*; + + #[freeze_struct("84bcbf9b8d3f0ddf")] + #[derive(Encode, Decode, Debug)] + pub struct OldCrowdloanInfo { + pub creator: AccountId, + pub deposit: Balance, + pub min_contribution: Balance, + pub end: BlockNumber, + pub cap: Balance, + pub funds_account: AccountId, + pub raised: Balance, + pub target_address: Option, + pub call: Option, + pub finalized: bool, + } +} + +pub fn migrate_add_contributors_count() -> Weight { + let migration_name = BoundedVec::truncate_from(b"migrate_add_contributors_count".to_vec()); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + let pallet_name = b"Crowdloan"; + let item_name = b"Crowdloans"; + let crowdloans = storage_key_iter::< + CrowdloanId, + old_storage::OldCrowdloanInfo< + T::AccountId, + BalanceOf, + BlockNumberFor, + BoundedCallOf, + >, + Twox64Concat, + >(pallet_name, item_name) + .collect::>(); + weight = weight.saturating_add(T::DbWeight::get().reads(crowdloans.len() as u64)); + + for (id, crowdloan) in crowdloans { + let contributions = Contributions::::iter_key_prefix(id) + .collect::>() + .len(); + weight = weight.saturating_add(T::DbWeight::get().reads(contributions as u64)); + + Crowdloans::::insert( + id, + CrowdloanInfo { + creator: crowdloan.creator, + deposit: crowdloan.deposit, + min_contribution: crowdloan.min_contribution, + end: crowdloan.end, + cap: crowdloan.cap, + funds_account: crowdloan.funds_account, + raised: crowdloan.raised, + target_address: crowdloan.target_address, + call: crowdloan.call, + finalized: crowdloan.finalized, + contributors_count: contributions as u32, + }, + ); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} + +#[cfg(test)] +mod tests { + use frame_support::{Hashable, storage::unhashed::put_raw}; + use sp_core::U256; + use sp_io::hashing::twox_128; + + use super::*; + use crate::mock::{Test, TestState}; + + #[test] + fn test_migrate_add_contributors_count_works() { + TestState::default().build_and_execute(|| { + let pallet_name = twox_128(b"Crowdloan"); + let storage_name = twox_128(b"Crowdloans"); + let prefix = [pallet_name, storage_name].concat(); + + let items = vec![ + ( + old_storage::OldCrowdloanInfo { + creator: U256::from(1), + deposit: 100u64, + min_contribution: 10u64, + end: 100u64, + cap: 1000u64, + funds_account: U256::from(2), + raised: 0u64, + target_address: None, + call: None::>, + finalized: false, + }, + vec![(U256::from(1), 100)], + ), + ( + old_storage::OldCrowdloanInfo { + creator: U256::from(1), + deposit: 100u64, + min_contribution: 10u64, + end: 100u64, + cap: 1000u64, + funds_account: U256::from(2), + raised: 0u64, + target_address: None, + call: None::>, + finalized: false, + }, + vec![ + (U256::from(1), 100), + (U256::from(2), 100), + (U256::from(3), 100), + ], + ), + ( + old_storage::OldCrowdloanInfo { + creator: U256::from(1), + deposit: 100u64, + min_contribution: 10u64, + end: 100u64, + cap: 1000u64, + funds_account: U256::from(2), + raised: 0u64, + target_address: None, + call: None::>, + finalized: false, + }, + vec![ + (U256::from(1), 100), + (U256::from(2), 100), + (U256::from(3), 100), + (U256::from(4), 100), + (U256::from(5), 100), + ], + ), + ]; + + for (id, (crowdloan, contributions)) in items.into_iter().enumerate() { + let key = [prefix.clone(), (id as u32).twox_64_concat()].concat(); + put_raw(&key, &crowdloan.encode()); + + for (contributor, amount) in contributions { + Contributions::::insert(id as u32, contributor, amount); + } + } + + migrate_add_contributors_count::(); + + assert!(Crowdloans::::get(0).is_some_and(|c| c.contributors_count == 1)); + assert!(Crowdloans::::get(1).is_some_and(|c| c.contributors_count == 3)); + assert!(Crowdloans::::get(2).is_some_and(|c| c.contributors_count == 5)); + + assert!(HasMigrationRun::::get(BoundedVec::truncate_from( + b"migrate_add_contributors_count".to_vec() + ))); + }); + } +} diff --git a/pallets/crowdloan/src/migrations/mod.rs b/pallets/crowdloan/src/migrations/mod.rs new file mode 100644 index 0000000000..f6701fb83a --- /dev/null +++ b/pallets/crowdloan/src/migrations/mod.rs @@ -0,0 +1,2 @@ +mod migrate_add_contributors_count; +pub use migrate_add_contributors_count::*; diff --git a/pallets/crowdloan/src/mock.rs b/pallets/crowdloan/src/mock.rs index 980b9fa26b..78cf15717c 100644 --- a/pallets/crowdloan/src/mock.rs +++ b/pallets/crowdloan/src/mock.rs @@ -111,6 +111,7 @@ parameter_types! { pub const MinimumBlockDuration: u64 = 20; pub const MaximumBlockDuration: u64 = 100; pub const RefundContributorsLimit: u32 = 5; + pub const MaxContributors: u32 = 10; } impl pallet_crowdloan::Config for Test { @@ -125,6 +126,7 @@ impl pallet_crowdloan::Config for Test { type MinimumBlockDuration = MinimumBlockDuration; type MaximumBlockDuration = MaximumBlockDuration; type RefundContributorsLimit = RefundContributorsLimit; + type MaxContributors = MaxContributors; } // A test pallet used to test some behavior of the crowdloan pallet diff --git a/pallets/crowdloan/src/tests.rs b/pallets/crowdloan/src/tests.rs index 59bfea6b83..1e03854b1f 100644 --- a/pallets/crowdloan/src/tests.rs +++ b/pallets/crowdloan/src/tests.rs @@ -1,7 +1,7 @@ #![cfg(test)] #![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] -use frame_support::{assert_err, assert_ok, traits::StorePreimage}; +use frame_support::{StorageDoubleMap, assert_err, assert_ok, traits::StorePreimage}; use frame_system::pallet_prelude::BlockNumberFor; use sp_core::U256; use sp_runtime::DispatchError; @@ -46,6 +46,7 @@ fn test_create_succeeds() { target_address: None, call: Some(call), finalized: false, + contributors_count: 1, }) ); // ensure the crowdloan account has the deposit @@ -330,8 +331,15 @@ fn test_contribute_succeeds() { // run some blocks run_to_block(10); - // first contribution to the crowdloan from creator let crowdloan_id: CrowdloanId = 0; + + // only the creator has contributed so far + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + + // first contribution to the crowdloan from creator let amount: BalanceOf = 50; assert_ok!(Crowdloan::contribute( RuntimeOrigin::signed(creator), @@ -351,6 +359,10 @@ fn test_contribute_succeeds() { pallet_crowdloan::Contributions::::get(crowdloan_id, creator), Some(100) ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); assert_eq!( Balances::free_balance(creator), 200 - amount - initial_deposit @@ -377,6 +389,10 @@ fn test_contribute_succeeds() { pallet_crowdloan::Contributions::::get(crowdloan_id, contributor1), Some(100) ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 2) + ); assert_eq!(Balances::free_balance(contributor1), 500 - amount); // third contribution to the crowdloan @@ -400,6 +416,10 @@ fn test_contribute_succeeds() { pallet_crowdloan::Contributions::::get(crowdloan_id, contributor2), Some(50) ); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 3) + ); assert_eq!(Balances::free_balance(contributor2), 200 - amount); // ensure the contributions are present in the funds account @@ -656,6 +676,62 @@ fn test_contribute_fails_if_contribution_is_below_minimum_contribution() { }); } +#[test] +fn test_contribute_fails_if_max_contributors_has_been_reached() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) + .with_balance(U256::from(3), 100) + .with_balance(U256::from(4), 100) + .with_balance(U256::from(5), 100) + .with_balance(U256::from(6), 100) + .with_balance(U256::from(7), 100) + .with_balance(U256::from(8), 100) + .with_balance(U256::from(9), 100) + .with_balance(U256::from(10), 100) + .with_balance(U256::from(11), 100) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 1000; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some blocks + run_to_block(10); + + // contribute to the crowdloan + let crowdloan_id: CrowdloanId = 0; + let amount: BalanceOf = 20; + for i in 2..=10 { + let contributor: AccountOf = U256::from(i); + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + } + + // try to contribute + let contributor: AccountOf = U256::from(10); + assert_err!( + Crowdloan::contribute(RuntimeOrigin::signed(contributor), crowdloan_id, amount), + pallet_crowdloan::Error::::MaxContributorsReached + ); + }); +} + #[test] fn test_contribute_fails_if_contributor_has_insufficient_balance() { TestState::default() @@ -695,7 +771,7 @@ fn test_contribute_fails_if_contributor_has_insufficient_balance() { } #[test] -fn test_withdraw_succeeds() { +fn test_withdraw_from_contributor_succeeds() { TestState::default() .with_balance(U256::from(1), 100) .with_balance(U256::from(2), 100) @@ -723,65 +799,87 @@ fn test_withdraw_succeeds() { // contribute to the crowdloan let crowdloan_id: CrowdloanId = 0; - let contributor: AccountOf = U256::from(2); - let amount: BalanceOf = 100; + let contributor1: AccountOf = U256::from(2); + let amount1: BalanceOf = 100; assert_ok!(Crowdloan::contribute( - RuntimeOrigin::signed(contributor), + RuntimeOrigin::signed(contributor1), crowdloan_id, - amount + amount1 + )); + + let contributor2: AccountOf = U256::from(3); + let amount2: BalanceOf = 100; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor2), + crowdloan_id, + amount2 )); // run some more blocks past the end of the contribution period run_to_block(60); - // withdraw from creator + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 3) + ); + + // withdraw from contributor1 assert_ok!(Crowdloan::withdraw( - RuntimeOrigin::signed(creator), - creator, + RuntimeOrigin::signed(contributor1), crowdloan_id )); - // ensure the creator contribution has been removed + // ensure the contributor1 contribution has been removed assert_eq!( - pallet_crowdloan::Contributions::::get(crowdloan_id, creator), + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor1), None, ); - // ensure the creator has the correct amount - assert_eq!(pallet_balances::Pallet::::free_balance(creator), 100); + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 2) + ); + // ensure the contributor1 has the correct amount + assert_eq!( + pallet_balances::Pallet::::free_balance(contributor1), + 100 + ); - // withdraw from contributor + // withdraw from contributor2 assert_ok!(Crowdloan::withdraw( - RuntimeOrigin::signed(contributor), - contributor, + RuntimeOrigin::signed(contributor2), crowdloan_id )); - // ensure the creator contribution has been removed + // ensure the contributor2 contribution has been removed assert_eq!( - pallet_crowdloan::Contributions::::get(crowdloan_id, contributor), + pallet_crowdloan::Contributions::::get(crowdloan_id, contributor2), None, ); - // ensure the contributor has the correct amount + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + // ensure the contributor2 has the correct amount assert_eq!( - pallet_balances::Pallet::::free_balance(contributor), + pallet_balances::Pallet::::free_balance(contributor2), 100 ); // ensure the crowdloan account has the correct amount let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); - assert_eq!(Balances::free_balance(funds_account), 0); + assert_eq!(Balances::free_balance(funds_account), initial_deposit); // ensure the crowdloan raised amount is updated correctly assert!( pallet_crowdloan::Crowdloans::::get(crowdloan_id) - .is_some_and(|c| c.raised == 0) + .is_some_and(|c| c.raised == initial_deposit) ); }); } #[test] -fn test_withdraw_succeeds_for_another_contributor() { +fn test_withdraw_from_creator_with_contribution_over_deposit_succeeds() { TestState::default() - .with_balance(U256::from(1), 100) - .with_balance(U256::from(2), 100) + .with_balance(U256::from(1), 200) .build_and_execute(|| { // create a crowdloan let creator: AccountOf = U256::from(1); @@ -800,44 +898,92 @@ fn test_withdraw_succeeds_for_another_contributor() { None )); - // run some blocks - run_to_block(10); - - // contribute to the crowdloan + // contribute to the crowdloan as the creator let crowdloan_id: CrowdloanId = 0; - let contributor: AccountOf = U256::from(2); - let amount: BalanceOf = 100; + let amount: BalanceOf = 100; assert_ok!(Crowdloan::contribute( - RuntimeOrigin::signed(contributor), + RuntimeOrigin::signed(creator), crowdloan_id, amount )); - // run some more blocks past the end of the contribution period - run_to_block(60); + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); - // withdraw for creator as a contributor + // withdraw + let crowdloan_id: CrowdloanId = 0; assert_ok!(Crowdloan::withdraw( - RuntimeOrigin::signed(contributor), - creator, + RuntimeOrigin::signed(creator), crowdloan_id )); + // ensure the creator has the correct amount - assert_eq!(pallet_balances::Pallet::::free_balance(creator), 100); - // ensure the contributor has the correct amount assert_eq!( - pallet_balances::Pallet::::free_balance(contributor), - 0 + pallet_balances::Pallet::::free_balance(creator), + 200 - initial_deposit + ); + // ensure the creator contribution has been removed + assert_eq!( + pallet_crowdloan::Contributions::::get(crowdloan_id, creator), + Some(initial_deposit), + ); + // ensure the contributor count hasn't changed because deposit is kept + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) ); // ensure the crowdloan account has the correct amount let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); - assert_eq!(Balances::free_balance(funds_account), 100); + assert_eq!(Balances::free_balance(funds_account), initial_deposit); // ensure the crowdloan raised amount is updated correctly assert!( pallet_crowdloan::Crowdloans::::get(crowdloan_id) - .is_some_and(|c| c.raised == 100) + .is_some_and(|c| c.raised == initial_deposit) + ); + }); +} +#[test] +fn test_withdraw_fails_from_creator_with_no_contribution_over_deposit() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // try to withdraw + let crowdloan_id: CrowdloanId = 0; + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::DepositCannotBeWithdrawn + ); + + // ensure the crowdloan account has the correct amount + let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); + assert_eq!(Balances::free_balance(funds_account), initial_deposit); + // ensure the crowdloan raised amount is updated correctly + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.raised == initial_deposit) ); }); } @@ -848,12 +994,12 @@ fn test_withdraw_fails_if_bad_origin() { let crowdloan_id: CrowdloanId = 0; assert_err!( - Crowdloan::withdraw(RuntimeOrigin::none(), U256::from(1), crowdloan_id), + Crowdloan::withdraw(RuntimeOrigin::none(), crowdloan_id), DispatchError::BadOrigin ); assert_err!( - Crowdloan::withdraw(RuntimeOrigin::root(), U256::from(1), crowdloan_id), + Crowdloan::withdraw(RuntimeOrigin::root(), crowdloan_id), DispatchError::BadOrigin ); }); @@ -866,47 +1012,39 @@ fn test_withdraw_fails_if_crowdloan_does_not_exists() { let crowdloan_id: CrowdloanId = 0; assert_err!( - Crowdloan::withdraw( - RuntimeOrigin::signed(contributor), - contributor, - crowdloan_id - ), + Crowdloan::withdraw(RuntimeOrigin::signed(contributor), crowdloan_id), pallet_crowdloan::Error::::InvalidCrowdloanId ); }); } #[test] -fn test_withdraw_fails_if_no_contribution_exists() { +fn test_withdraw_fails_if_crowdloan_has_already_been_finalized() { TestState::default() .with_balance(U256::from(1), 100) .with_balance(U256::from(2), 200) - .with_balance(U256::from(3), 100) .build_and_execute(|| { // create a crowdloan let creator: AccountOf = U256::from(1); - let initial_deposit: BalanceOf = 50; + let deposit: BalanceOf = 50; let min_contribution: BalanceOf = 10; - let cap: BalanceOf = 300; + let cap: BalanceOf = 100; let end: BlockNumberFor = 50; assert_ok!(Crowdloan::create( RuntimeOrigin::signed(creator), - initial_deposit, + deposit, min_contribution, cap, end, Some(noop_call()), - None + None, )); - // run some blocks - run_to_block(10); - - // contribute to the crowdloan - let contributor: AccountOf = U256::from(2); + // some contribution let crowdloan_id: CrowdloanId = 0; - let amount: BalanceOf = 100; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; assert_ok!(Crowdloan::contribute( RuntimeOrigin::signed(contributor), @@ -917,14 +1055,51 @@ fn test_withdraw_fails_if_no_contribution_exists() { // run some more blocks past the end of the contribution period run_to_block(60); + // finalize the crowdloan + assert_ok!(Crowdloan::finalize( + RuntimeOrigin::signed(creator), + crowdloan_id + )); + // try to withdraw - let contributor2: AccountOf = U256::from(3); assert_err!( - Crowdloan::withdraw( - RuntimeOrigin::signed(contributor2), - contributor2, - crowdloan_id - ), + Crowdloan::withdraw(RuntimeOrigin::signed(creator), crowdloan_id), + pallet_crowdloan::Error::::AlreadyFinalized + ); + }); +} + +#[test] +fn test_withdraw_fails_if_no_contribution_exists() { + TestState::default() + .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 200) + .build_and_execute(|| { + // create a crowdloan + let creator: AccountOf = U256::from(1); + let initial_deposit: BalanceOf = 50; + let min_contribution: BalanceOf = 10; + let cap: BalanceOf = 300; + let end: BlockNumberFor = 50; + + assert_ok!(Crowdloan::create( + RuntimeOrigin::signed(creator), + initial_deposit, + min_contribution, + cap, + end, + Some(noop_call()), + None + )); + + // run some more blocks past the end of the contribution period + run_to_block(60); + + // try to withdraw + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + assert_err!( + Crowdloan::withdraw(RuntimeOrigin::signed(contributor), crowdloan_id), pallet_crowdloan::Error::::NoContribution ); }); @@ -1404,6 +1579,12 @@ fn test_refund_succeeds() { )); } + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 7) + ); + // run some more blocks past the end of the contribution period run_to_block(60); @@ -1413,6 +1594,12 @@ fn test_refund_succeeds() { crowdloan_id )); + // ensure the contributor count is correct, we processed 5 refunds + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 2) + ); + // ensure the crowdloan account has the correct amount let funds_account = pallet_crowdloan::Pallet::::funds_account(crowdloan_id); assert_eq!(Balances::free_balance(funds_account), 350 - 5 * amount); @@ -1436,19 +1623,29 @@ fn test_refund_succeeds() { crowdloan_id )); + // ensure the contributor count is correct, we processed 1 more refund + // keeping deposit + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); + // ensure the crowdloan account has the correct amount assert_eq!( pallet_balances::Pallet::::free_balance(funds_account), - 0 + initial_deposit ); // ensure the raised amount is updated correctly assert!( pallet_crowdloan::Crowdloans::::get(crowdloan_id) - .is_some_and(|c| c.raised == 0) + .is_some_and(|c| c.raised == initial_deposit) ); // ensure creator has the correct amount - assert_eq!(pallet_balances::Pallet::::free_balance(creator), 100); + assert_eq!( + pallet_balances::Pallet::::free_balance(creator), + initial_deposit + ); // ensure each contributor has been refunded and removed from the crowdloan for i in 2..8 { @@ -1561,15 +1758,15 @@ fn test_dissolve_succeeds() { // run some blocks past end run_to_block(60); - // refund the contributions let crowdloan_id: CrowdloanId = 0; - assert_ok!(Crowdloan::refund( - RuntimeOrigin::signed(creator), - crowdloan_id - )); + + // ensure the contributor count is correct + assert!( + pallet_crowdloan::Crowdloans::::get(crowdloan_id) + .is_some_and(|c| c.contributors_count == 1) + ); // dissolve the crowdloan - let crowdloan_id: CrowdloanId = 0; assert_ok!(Crowdloan::dissolve( RuntimeOrigin::signed(creator), crowdloan_id @@ -1578,6 +1775,11 @@ fn test_dissolve_succeeds() { // ensure the crowdloan is removed from the crowdloans map assert!(pallet_crowdloan::Crowdloans::::get(crowdloan_id).is_none()); + // ensure the contributions are removed + assert!(!pallet_crowdloan::Contributions::::contains_prefix( + crowdloan_id + )); + // ensure the event is emitted assert_eq!( last_event(), @@ -1706,6 +1908,7 @@ fn test_dissolve_fails_if_origin_is_not_creator() { fn test_dissolve_fails_if_not_everyone_has_been_refunded() { TestState::default() .with_balance(U256::from(1), 100) + .with_balance(U256::from(2), 100) .build_and_execute(|| { // create a crowdloan let creator: AccountOf = U256::from(1); @@ -1724,7 +1927,20 @@ fn test_dissolve_fails_if_not_everyone_has_been_refunded() { None, )); - // run some blocks past end + // run some blocks + run_to_block(10); + + // some contribution + let crowdloan_id: CrowdloanId = 0; + let contributor: AccountOf = U256::from(2); + let amount: BalanceOf = 50; + assert_ok!(Crowdloan::contribute( + RuntimeOrigin::signed(contributor), + crowdloan_id, + amount + )); + + // run some blocks run_to_block(10); // try to dissolve the crowdloan @@ -1932,7 +2148,7 @@ fn test_update_min_contribution_fails_if_new_min_contribution_is_too_low() { )); let crowdloan_id: CrowdloanId = 0; - let new_min_contribution: BalanceOf = 10; + let new_min_contribution: BalanceOf = 9; // try update the min contribution assert_err!( @@ -2389,7 +2605,7 @@ fn test_update_cap_fails_if_new_cap_is_too_low() { // try update the cap let crowdloan_id: CrowdloanId = 0; - let new_cap: BalanceOf = 50; + let new_cap: BalanceOf = 49; assert_err!( Crowdloan::update_cap(RuntimeOrigin::signed(creator), crowdloan_id, new_cap), pallet_crowdloan::Error::::CapTooLow diff --git a/pallets/crowdloan/src/weights.rs b/pallets/crowdloan/src/weights.rs index 988f7a4efa..927e078d34 100644 --- a/pallets/crowdloan/src/weights.rs +++ b/pallets/crowdloan/src/weights.rs @@ -2,7 +2,7 @@ //! Autogenerated weights for `pallet_crowdloan` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 43.0.0 -//! DATE: 2025-04-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `Ubuntu-2404-noble-amd64-base`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` @@ -57,8 +57,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `156` // Estimated: `6148` - // Minimum execution time: 40_556_000 picoseconds. - Weight::from_parts(41_318_000, 6148) + // Minimum execution time: 42_128_000 picoseconds. + Weight::from_parts(42_930_000, 6148) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -72,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `476` // Estimated: `6148` - // Minimum execution time: 42_900_000 picoseconds. - Weight::from_parts(43_682_000, 6148) + // Minimum execution time: 43_161_000 picoseconds. + Weight::from_parts(44_192_000, 6148) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `436` // Estimated: `6148` - // Minimum execution time: 41_037_000 picoseconds. - Weight::from_parts(41_968_000, 6148) + // Minimum execution time: 40_235_000 picoseconds. + Weight::from_parts(40_907_000, 6148) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -104,44 +104,45 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `376` // Estimated: `6148` - // Minimum execution time: 41_567_000 picoseconds. - Weight::from_parts(42_088_000, 6148) + // Minimum execution time: 40_986_000 picoseconds. + Weight::from_parts(41_858_000, 6148) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) - /// Storage: `Crowdloan::Contributions` (r:51 w:50) + /// Storage: `Crowdloan::Contributions` (r:51 w:49) /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:51 w:51) + /// Storage: `System::Account` (r:50 w:50) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) /// The range of component `k` is `[3, 50]`. fn refund(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `440 + k * (48 ±0)` + // Measured: `372 + k * (49 ±0)` // Estimated: `3743 + k * (2579 ±0)` - // Minimum execution time: 97_612_000 picoseconds. - Weight::from_parts(36_327_787, 3743) - // Standard Error: 81_635 - .saturating_add(Weight::from_parts(25_989_645, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 78_938_000 picoseconds. + Weight::from_parts(2_729_302, 3743) + // Standard Error: 351_422 + .saturating_add(Weight::from_parts(31_033_274, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 2579).saturating_mul(k.into())) } /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `Crowdloan::Contributions` (r:1 w:0) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) fn dissolve() -> Weight { // Proof Size summary in bytes: - // Measured: `321` - // Estimated: `3743` - // Minimum execution time: 11_832_000 picoseconds. - Weight::from_parts(12_293_000, 3743) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `450` + // Estimated: `6148` + // Minimum execution time: 43_341_000 picoseconds. + Weight::from_parts(44_402_000, 6148) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) @@ -149,8 +150,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `224` // Estimated: `3743` - // Minimum execution time: 8_776_000 picoseconds. - Weight::from_parts(9_057_000, 3743) + // Minimum execution time: 8_876_000 picoseconds. + Weight::from_parts(9_137_000, 3743) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -160,8 +161,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `224` // Estimated: `3743` - // Minimum execution time: 9_067_000 picoseconds. - Weight::from_parts(9_368_000, 3743) + // Minimum execution time: 9_117_000 picoseconds. + Weight::from_parts(9_438_000, 3743) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -171,8 +172,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `224` // Estimated: `3743` - // Minimum execution time: 8_636_000 picoseconds. - Weight::from_parts(9_027_000, 3743) + // Minimum execution time: 8_766_000 picoseconds. + Weight::from_parts(9_087_000, 3743) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -192,8 +193,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `156` // Estimated: `6148` - // Minimum execution time: 40_556_000 picoseconds. - Weight::from_parts(41_318_000, 6148) + // Minimum execution time: 42_128_000 picoseconds. + Weight::from_parts(42_930_000, 6148) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -207,8 +208,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `476` // Estimated: `6148` - // Minimum execution time: 42_900_000 picoseconds. - Weight::from_parts(43_682_000, 6148) + // Minimum execution time: 43_161_000 picoseconds. + Weight::from_parts(44_192_000, 6148) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -222,8 +223,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `436` // Estimated: `6148` - // Minimum execution time: 41_037_000 picoseconds. - Weight::from_parts(41_968_000, 6148) + // Minimum execution time: 40_235_000 picoseconds. + Weight::from_parts(40_907_000, 6148) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -239,44 +240,45 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `376` // Estimated: `6148` - // Minimum execution time: 41_567_000 picoseconds. - Weight::from_parts(42_088_000, 6148) + // Minimum execution time: 40_986_000 picoseconds. + Weight::from_parts(41_858_000, 6148) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) - /// Storage: `Crowdloan::Contributions` (r:51 w:50) + /// Storage: `Crowdloan::Contributions` (r:51 w:49) /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:51 w:51) + /// Storage: `System::Account` (r:50 w:50) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) /// The range of component `k` is `[3, 50]`. fn refund(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `440 + k * (48 ±0)` + // Measured: `372 + k * (49 ±0)` // Estimated: `3743 + k * (2579 ±0)` - // Minimum execution time: 97_612_000 picoseconds. - Weight::from_parts(36_327_787, 3743) - // Standard Error: 81_635 - .saturating_add(Weight::from_parts(25_989_645, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 78_938_000 picoseconds. + Weight::from_parts(2_729_302, 3743) + // Standard Error: 351_422 + .saturating_add(Weight::from_parts(31_033_274, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(k.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 2579).saturating_mul(k.into())) } /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `Crowdloan::Contributions` (r:1 w:0) + /// Proof: `Crowdloan::Contributions` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(104), added: 2579, mode: `MaxEncodedLen`) fn dissolve() -> Weight { // Proof Size summary in bytes: - // Measured: `321` - // Estimated: `3743` - // Minimum execution time: 11_832_000 picoseconds. - Weight::from_parts(12_293_000, 3743) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `450` + // Estimated: `6148` + // Minimum execution time: 43_341_000 picoseconds. + Weight::from_parts(44_402_000, 6148) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Crowdloan::Crowdloans` (r:1 w:1) /// Proof: `Crowdloan::Crowdloans` (`max_values`: None, `max_size`: Some(278), added: 2753, mode: `MaxEncodedLen`) @@ -284,8 +286,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `224` // Estimated: `3743` - // Minimum execution time: 8_776_000 picoseconds. - Weight::from_parts(9_057_000, 3743) + // Minimum execution time: 8_876_000 picoseconds. + Weight::from_parts(9_137_000, 3743) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -295,8 +297,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `224` // Estimated: `3743` - // Minimum execution time: 9_067_000 picoseconds. - Weight::from_parts(9_368_000, 3743) + // Minimum execution time: 9_117_000 picoseconds. + Weight::from_parts(9_438_000, 3743) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -306,8 +308,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `224` // Estimated: `3743` - // Minimum execution time: 8_636_000 picoseconds. - Weight::from_parts(9_027_000, 3743) + // Minimum execution time: 8_766_000 picoseconds. + Weight::from_parts(9_087_000, 3743) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index 40bf7ccb9b..dd172befc0 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -73,8 +73,6 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod weights; -pub use weights::*; /// the main drand api endpoint const ENDPOINTS: [&str; 5] = [ @@ -162,8 +160,6 @@ pub mod pallet { type AuthorityId: AppCrypto; /// The overarching runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// A type representing the weights required by the dispatchables of this pallet. - type WeightInfo: WeightInfo; /// something that knows how to verify beacon pulses type Verifier: Verifier; /// A configuration for base priority of unsigned transactions. @@ -309,7 +305,9 @@ pub mod pallet { impl Pallet { /// Verify and write a pulse from the beacon into the runtime #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::write_pulse(pulses_payload.pulses.len() as u32))] + #[pallet::weight(Weight::from_parts(5_708_000_000, 0) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)))] pub fn write_pulse( origin: OriginFor, pulses_payload: PulsesPayload>, @@ -363,7 +361,9 @@ pub mod pallet { /// * `origin`: the root user /// * `config`: the beacon configuration #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::set_beacon_config())] + #[pallet::weight(Weight::from_parts(9_878_000, 0) + .saturating_add(T::DbWeight::get().reads(0_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)))] pub fn set_beacon_config( origin: OriginFor, config_payload: BeaconConfigurationPayload>, diff --git a/pallets/drand/src/mock.rs b/pallets/drand/src/mock.rs index ba9e16e6f4..6ef1f2bf8a 100644 --- a/pallets/drand/src/mock.rs +++ b/pallets/drand/src/mock.rs @@ -88,7 +88,6 @@ parameter_types! { impl pallet_drand_bridge::Config for Test { type AuthorityId = crypto::TestAuthId; type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand_bridge::weights::SubstrateWeight; type Verifier = QuicknetVerifier; type UnsignedPriority = UnsignedPriority; type HttpFetchTimeout = ConstU64<1_000>; diff --git a/pallets/drand/src/weights.rs b/pallets/drand/src/weights.rs deleted file mode 100644 index 6ab6e2905d..0000000000 --- a/pallets/drand/src/weights.rs +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2024 by Ideal Labs, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//! Autogenerated weights for pallet_template -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// ../../target/release/node-template -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_template -// --extrinsic -// * -// --steps=50 -// --repeat=20 -// --wasm-execution=compiled -// --output -// pallets/template/src/weights.rs -// --template -// ../../.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for pallet_template. -pub trait WeightInfo { - fn write_pulse(pulses_count: u32) -> Weight; - fn set_beacon_config() -> Weight; -} - -/// Weights for pallet_template using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `Drand::BeaconConfig` (r:0 w:1) - /// Proof: `Drand::BeaconConfig` (`max_values`: Some(1), `max_size`: Some(238), added: 733, mode: `MaxEncodedLen`) - /// Storage: `Drand::NextUnsignedAt` (r:0 w:1) - /// Proof: `Drand::NextUnsignedAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_beacon_config() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_000_000 picoseconds. - Weight::from_parts(8_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Drand::BeaconConfig` (r:1 w:0) - /// Proof: `Drand::BeaconConfig` (`max_values`: Some(1), `max_size`: Some(238), added: 733, mode: `MaxEncodedLen`) - fn write_pulse(pulses_count: u32) -> Weight { - // Adjust the weight calculation based on pulses_count - Weight::from_parts(6_000_000 * pulses_count as u64, 0) - .saturating_add(Weight::from_parts(0, 1723 * pulses_count as u64)) - .saturating_add(T::DbWeight::get().reads_writes(1, pulses_count as u64)) - } -} diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index b5ff1197e0..717722c479 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -5,7 +5,7 @@ use crate::Pallet as Subtensor; use crate::*; use codec::Compact; -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; pub use pallet::*; @@ -16,1276 +16,1473 @@ use sp_runtime::{ }; use sp_std::vec; -benchmarks! { - // Add individual benchmarks here - benchmark_register { - let netuid: u16 = 1; //11 is the benchmark network. - let tempo: u16 = 1; - let modality: u16 = 0; - let hotkey: T::AccountId = account("Alice", 0, 1); - let coldkey: T::AccountId = account("Test", 0, 2); - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey, - ); - - - }: register( RawOrigin::Signed( hotkey.clone() ), netuid, block_number, nonce, work, hotkey.clone(), coldkey.clone() ) - - benchmark_set_weights { - - // This is a whitelisted caller who can make transaction without weights. - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_network_registration_allowed( netuid, true ); - Subtensor::::set_max_registrations_per_block( netuid, 4096 ); - Subtensor::::set_target_registrations_per_interval( netuid, 4096 ); - - let mut seed : u32 = 1; - let mut dests: Vec = vec![]; - let mut weights: Vec = vec![]; - let signer : T::AccountId = account("Alice", 0, seed); - - for id in 0..4096_u16 { - let hotkey: T::AccountId = account("Alice", 0, seed); - let coldkey: T::AccountId = account("Test", 0, seed); - seed += 1; +#[frame_benchmarking::v2::benchmarks] +mod pallet_benchmarks { + use super::*; + + #[benchmark] + fn register() { + let netuid: u16 = 1; + let tempo: u16 = 1; + let hotkey: T::AccountId = account("Alice", 0, 1); + let coldkey: T::AccountId = account("Test", 0, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work): (u64, Vec) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + block_number, + nonce, + work, + hotkey.clone(), + coldkey.clone(), + ); + } + + #[benchmark] + fn set_weights() { + let netuid: u16 = 1; + let version_key: u64 = 1; + let tempo: u16 = 1; + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_max_allowed_uids(netuid, 4096); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_registrations_per_block(netuid, 4096); + Subtensor::::set_target_registrations_per_interval(netuid, 4096); + + let mut seed: u32 = 1; + let mut dests = Vec::new(); + let mut weights = Vec::new(); + let signer: T::AccountId = account("Alice", 0, seed); + + for _ in 0..4096 { + let hotkey: T::AccountId = account("Alice", 0, seed); + let coldkey: T::AccountId = account("Test", 0, seed); + seed += 1; + + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + let uid = Subtensor::::get_uid_for_net_and_hotkey(netuid, &hotkey).unwrap(); + Subtensor::::set_validator_permit_for_uid(netuid, uid, true); + + dests.push(uid); + weights.push(uid); + } + + #[extrinsic_call] + _( + RawOrigin::Signed(signer.clone()), + netuid, + dests, + weights, + version_key, + ); + } + + #[benchmark] + fn become_delegate() { + let netuid: u16 = 1; + let tempo: u16 = 1; + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + Subtensor::::set_max_allowed_uids(netuid, 4096); + Subtensor::::set_network_registration_allowed(netuid, true); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + let amount_to_be_staked: u64 = 1_000_000_000; + + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } + + #[benchmark] + fn add_stake() { + let netuid: u16 = 1; + let tempo: u16 = 1; + + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + let total_stake: u64 = 1_000_000_000; + let amount: u64 = 60_000_000; + + Subtensor::::add_balance_to_coldkey_account(&coldkey, total_stake); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount, + ); + } + + #[benchmark] + fn add_stake_aggregate() { + let netuid: u16 = 1; + let tempo: u16 = 1; + + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + let total_stake: u64 = 1_000_000_000; + let amount: u64 = 600_000; + + Subtensor::::add_balance_to_coldkey_account(&coldkey, total_stake); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount, + ); + } + + #[benchmark] + fn remove_stake_limit_aggregate() { + let netuid: u16 = 1; + + Subtensor::::increase_total_stake(1_000_000_000_000); + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::set_burn(netuid, 1); + + let limit: u64 = 1_000_000_000; + let tao_reserve: u64 = 150_000_000_000; + let alpha_in: u64 = 100_000_000_000; + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + + let wallet_bal: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, wallet_bal); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + Subtensor::::add_balance_to_coldkey_account(&coldkey, 100_000_000_000u64); + assert_ok!(Subtensor::::add_stake( + RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + 100_000_000_000u64 + )); + + let amount_unstaked: u64 = 30_000_000_000; + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount_unstaked, + limit, + false, + ); + } + + #[benchmark] + fn remove_stake_aggregate() { + let netuid: u16 = 1; + + Subtensor::::increase_total_stake(1_000_000_000_000); + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::set_burn(netuid, 1); + + let wallet_bal: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, wallet_bal); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + Subtensor::::add_balance_to_coldkey_account(&coldkey, 100_000_000_000u64); + assert_ok!(Subtensor::::add_stake( + RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + 100_000_000_000u64 + )); + + let amount_unstaked: u64 = 600_000; + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount_unstaked, + ); + } + + #[benchmark] + fn add_stake_limit_aggregate() { + let netuid: u16 = 1; + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + let amount: u64 = 900_000_000_000; + let limit: u64 = 6_000_000_000; + let stake_amt: u64 = 440_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + + let tao_reserve: u64 = 150_000_000_000; + let alpha_in: u64 = 100_000_000_000; + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + stake_amt, + limit, + false, + ); + } + + #[benchmark] + fn serve_axon() { + let netuid: u16 = 1; + let caller: T::AccountId = whitelisted_caller(); + let version: u32 = 2; + let ip: u128 = 1676056785; + let port: u16 = 128; + let ip_type: u8 = 4; + let protocol: u8 = 0; + let placeholder1: u8 = 0; + let placeholder2: u8 = 0; + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let reg_fee: u64 = Subtensor::::get_burn_as_u64(netuid); + let deposit = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&caller, deposit); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(caller.clone()).into(), + netuid, + caller.clone() + )); + Subtensor::::set_serving_rate_limit(netuid, 0); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + ); + } + + #[benchmark] + fn serve_prometheus() { + let netuid: u16 = 1; + let caller: T::AccountId = whitelisted_caller(); + let version: u32 = 2; + let ip: u128 = 1676056785; + let port: u16 = 128; + let ip_type: u8 = 4; + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let reg_fee: u64 = Subtensor::::get_burn_as_u64(netuid); + let deposit = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&caller, deposit); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(caller.clone()).into(), + netuid, + caller.clone() + )); + Subtensor::::set_serving_rate_limit(netuid, 0); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + netuid, + version, + ip, + port, + ip_type, + ); + } + + #[benchmark] + fn burned_register() { + let netuid: u16 = 1; + let seed: u32 = 1; + let hotkey: T::AccountId = account("Alice", 0, seed); + let coldkey: T::AccountId = account("Test", 0, seed); + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + + let amount: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), netuid, hotkey.clone()); + } + + #[benchmark] + fn root_register() { + let netuid: u16 = 1; + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + + let amount: u64 = 100_000_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } + + #[benchmark] + fn register_network() { + let seed: u32 = 1; + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("TestHotkey", 0, seed); + + Subtensor::::set_network_rate_limit(1); + let amount: u64 = 100_000_000_000_000u64.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } - Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())?; + #[benchmark] + fn commit_weights() { + let tempo: u16 = 1; + let netuid: u16 = 1; + let version_key: u64 = 0; + let uids: Vec = vec![0]; + let weight_values: Vec = vec![10]; + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 0, 2); + let start_nonce: u64 = 300_000; + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + weight_values.clone(), + version_key, + )); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = Subtensor::::create_work_for_block_number( + netuid, + block_number, + start_nonce, + &hotkey, + ); + assert_ok!(Subtensor::::register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + block_number, + nonce, + work, + hotkey.clone(), + coldkey.clone() + )); + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + #[extrinsic_call] + _(RawOrigin::Signed(hotkey.clone()), netuid, commit_hash); + } - let uid = Subtensor::::get_uid_for_net_and_hotkey(netuid, &hotkey.clone()).unwrap(); - Subtensor::::set_validator_permit_for_uid(netuid, uid, true); - dests.push(id); - weights.push(id); + #[benchmark] + fn reveal_weights() { + let tempo: u16 = 0; + let netuid: u16 = 1; + let version_key: u64 = 0; + let uids: Vec = vec![0]; + let weight_values: Vec = vec![10]; + let salt: Vec = vec![8]; + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 1, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey); + + let _ = Subtensor::::register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + block_number, + nonce, + work.clone(), + hotkey.clone(), + coldkey.clone(), + ); + + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + let _ = Subtensor::::commit_weights( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + commit_hash, + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); } - }: set_weights(RawOrigin::Signed( signer.clone() ), netuid, dests, weights, version_key) + #[benchmark] + fn schedule_swap_coldkey() { + let old_coldkey: T::AccountId = account("old_cold", 0, 1); + let new_coldkey: T::AccountId = account("new_cold", 1, 2); + let amount: u64 = 100_000_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&old_coldkey, amount); + #[extrinsic_call] + _(RawOrigin::Signed(old_coldkey.clone()), new_coldkey.clone()); + } - benchmark_become_delegate { - // This is a whitelisted caller who can make transaction without weights. - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + #[benchmark] + fn sudo_set_tx_childkey_take_rate_limit() { + let new_rate_limit: u64 = 100; - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); + #[extrinsic_call] + _(RawOrigin::Root, new_rate_limit); + } - Subtensor::::set_network_registration_allowed( netuid, true); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn set_childkey_take() { + let netuid: u16 = 1; + let coldkey: T::AccountId = account("Cold", 0, 1); + let hotkey: T::AccountId = account("Hot", 0, 1); + let take: u16 = 1000; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee: u64 = Subtensor::::get_burn_as_u64(netuid); + let deposit = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + take, + ); + } - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); + #[benchmark] + fn swap_coldkey() { + let old_coldkey: T::AccountId = account("old_coldkey", 0, 0); + let new_coldkey: T::AccountId = account("new_coldkey", 0, 0); + let hotkey1: T::AccountId = account("hotkey1", 0, 0); + let netuid: u16 = 1; + let swap_cost: u64 = Subtensor::::get_key_swap_cost(); + let free_balance_old: u64 = 12345 + swap_cost; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey1); + let _ = Subtensor::::register( + RawOrigin::Signed(old_coldkey.clone()).into(), + netuid, + block_number, + nonce, + work.clone(), + hotkey1.clone(), + old_coldkey.clone(), + ); + + Subtensor::::add_balance_to_coldkey_account(&old_coldkey, free_balance_old); + let name: Vec = b"The fourth Coolest Identity".to_vec(); + let identity = ChainIdentity { + name, + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + Identities::::insert(&old_coldkey, identity); + + #[extrinsic_call] + _( + RawOrigin::Root, + old_coldkey.clone(), + new_coldkey.clone(), + swap_cost, + ); + } - let amount_to_be_staked = 1000000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + #[benchmark] + fn batch_reveal_weights() { + let tempo: u16 = 0; + let netuid: u16 = 1; + let num_commits: usize = 10; + + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 0, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + Subtensor::::set_weights_set_rate_limit(netuid, 0); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work) = + Subtensor::::create_work_for_block_number(netuid, block_number, 3, &hotkey); + let origin = T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())); + assert_ok!(Subtensor::::register( + origin.clone(), + netuid, + block_number, + nonce, + work.clone(), + hotkey.clone(), + coldkey.clone() + )); + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + + let mut uids_list = Vec::new(); + let mut values_list = Vec::new(); + let mut salts_list = Vec::new(); + let mut version_keys = Vec::new(); + + for i in 0..num_commits { + let uids = vec![0u16]; + let values = vec![i as u16]; + let salts = vec![i as u16]; + let version_key_i: u64 = i as u64; + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + values.clone(), + salts.clone(), + version_key_i, + )); + + assert_ok!(Subtensor::::commit_weights( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + commit_hash + )); + + uids_list.push(uids); + values_list.push(values); + salts_list.push(salts); + version_keys.push(version_key_i); + } + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + uids_list, + values_list, + salts_list, + version_keys, + ); + } - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: become_delegate(RawOrigin::Signed( coldkey.clone() ), hotkey.clone()) + #[benchmark] + fn recycle_alpha() { + let netuid: u16 = 1; - benchmark_add_stake { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + let coldkey: T::AccountId = account("Test", 0, 1); + let hotkey: T::AccountId = account("Alice", 0, 1); - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_burn(netuid, 1); - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true ); + let amount_to_be_staked: u64 = 1_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + let alpha_amount: u64 = 1_000_000; + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + alpha_amount, + ); + + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + alpha_amount, + netuid, + ); + } - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn burn_alpha() { + let netuid: u16 = 1; + let coldkey: T::AccountId = account("Test", 0, 1); + let hotkey: T::AccountId = account("Alice", 0, 1); - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_burn(netuid, 1); - let amount: u64 = 60000000; - let amount_to_be_staked = 1000000000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + let amount_to_be_staked: u64 = 1_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + let alpha_amount: u64 = 1_000_000; + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + alpha_amount, + ); + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + alpha_amount, + netuid, + ); + } - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: add_stake(RawOrigin::Signed( coldkey.clone() ), hotkey, netuid, amount) + #[benchmark] + fn start_call() { + let netuid: u16 = 1; + let coldkey: T::AccountId = account("Test", 0, 1); + let hotkey: T::AccountId = account("Alice", 0, 1); - benchmark_add_stake_aggregate { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + Subtensor::::init_new_network(netuid, 1); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true ); + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked: u64 = 1_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + SubnetOwner::::set(netuid, coldkey.clone()); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + let current_block: u64 = Subtensor::::get_current_block_as_u64(); + let duration = ::DurationOfStartCall::get(); + let block: BlockNumberFor = (current_block + duration) + .try_into() + .ok() + .expect("can't convert to block number"); + frame_system::Pallet::::set_block_number(block); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), netuid); + } - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn adjust_senate() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 1); + let root: u16 = Subtensor::::get_root_netuid(); - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::init_new_network(root, 1); + Uids::::insert(root, &hotkey, 0u16); - let amount: u64 = 600000; - let amount_to_be_staked = 1000000000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: add_stake_aggregate(RawOrigin::Signed( coldkey.clone() ), hotkey, netuid, amount) + #[benchmark] + fn add_stake_limit() { + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; - benchmark_remove_stake_limit_aggregate{ - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; + Subtensor::::init_new_network(netuid, tempo); + SubtokenEnabled::::insert(netuid, true); + Subtensor::::set_burn(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_max_allowed_uids(netuid, 4096); + + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + let amount = 900_000_000_000; + let limit: u64 = 6_000_000_000; + let amount_to_be_staked = 440_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount); + + let tao_reserve = 150_000_000_000_u64; + let alpha_in = 100_000_000_000_u64; + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey, + netuid, + amount_to_be_staked, + limit, + false, + ); + } - // Set our total stake to 1000 TAO - Subtensor::::increase_total_stake(1_000_000_000_000); + #[benchmark] + fn move_stake() { + let coldkey: T::AccountId = whitelisted_caller(); + let origin: T::AccountId = account("A", 0, 1); + let destination: T::AccountId = account("B", 0, 2); + let netuid: u16 = 1; + + SubtokenEnabled::::insert(netuid, true); + Subtensor::::init_new_network(netuid, 1); + + let burn_fee = Subtensor::::get_burn_as_u64(netuid); + let stake_tao: u64 = 1_000_000; + let deposit = burn_fee.saturating_mul(2).saturating_add(stake_tao); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + origin.clone() + )); + + SubnetTAO::::insert(netuid, deposit); + SubnetAlphaIn::::insert(netuid, deposit); + TotalStake::::set(deposit); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + origin.clone(), + netuid, + stake_tao, + u64::MAX, + false + )); + + let alpha_to_move: u64 = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&origin, &coldkey, netuid); + + Subtensor::::create_account_if_non_existent(&coldkey, &destination); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + origin.clone(), + destination.clone(), + netuid, + netuid, + alpha_to_move, + ); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed( netuid, true ); - SubtokenEnabled::::insert(netuid, true); + #[benchmark] + fn remove_stake_limit() { + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - Subtensor::::set_burn(netuid, 1); - - let limit: u64 = 1_000_000_000; - let tao_reserve = 150_000_000_000_u64; - let alpha_in = 100_000_000_000_u64; - SubnetTAO::::insert(netuid, tao_reserve); - SubnetAlphaIn::::insert(netuid, alpha_in); - - let wallet_bal = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), wallet_bal); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + // Set our total stake to 1000 TAO + Subtensor::::increase_total_stake(1_000_000_000_000); - let u64_staked_amt = 100_000_000_000; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), u64_staked_amt); + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); - assert_ok!(Subtensor::::add_stake(RawOrigin::Signed( coldkey.clone() ).into() , hotkey.clone(), netuid, u64_staked_amt)); + Subtensor::::set_max_allowed_uids(netuid, 4096); + assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - let amount_unstaked: u64 = 30_000_000_000; - }: remove_stake_limit_aggregate(RawOrigin::Signed( coldkey.clone() ), hotkey.clone(), netuid, amount_unstaked, limit, false) + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + Subtensor::::set_burn(netuid, 1); - benchmark_remove_stake_aggregate{ - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; - - // Set our total stake to 1000 TAO - Subtensor::::increase_total_stake(1_000_000_000_000); + let limit: u64 = 1_000_000_000; + let tao_reserve = 150_000_000_000_u64; + let alpha_in = 100_000_000_000_u64; + SubnetTAO::::insert(netuid, tao_reserve); + SubnetAlphaIn::::insert(netuid, alpha_in); + + let wallet_bal = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), wallet_bal); + + assert_ok!(Subtensor::::do_burned_registration( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hotkey.clone() + )); + + let u64_staked_amt = 100_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), u64_staked_amt); + + assert_ok!(Subtensor::::add_stake( + RawOrigin::Signed(coldkey.clone()).into(), + hotkey.clone(), + netuid, + u64_staked_amt + )); + + let amount_unstaked: u64 = 30_000_000_000; + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + netuid, + amount_unstaked, + limit, + false, + ); + } - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed( netuid, true ); - SubtokenEnabled::::insert(netuid, true); + #[benchmark] + fn swap_stake_limit() { + let coldkey: T::AccountId = whitelisted_caller::>(); + let hot: T::AccountId = account("A", 0, 1); + let netuid1: u16 = 1; + let netuid2: u16 = 2; + let allow: bool = true; + + SubtokenEnabled::::insert(netuid1, true); + Subtensor::::init_new_network(netuid1, 1); + SubtokenEnabled::::insert(netuid2, true); + Subtensor::::init_new_network(netuid2, 1); + + let tao_reserve = 150_000_000_000_u64; + let alpha_in = 100_000_000_000_u64; + SubnetTAO::::insert(netuid1, tao_reserve); + SubnetAlphaIn::::insert(netuid1, alpha_in); + SubnetTAO::::insert(netuid2, tao_reserve); + + Subtensor::::increase_total_stake(1_000_000_000_000); + + let amount = 900_000_000_000; + let limit_stake: u64 = 6_000_000_000; + let limit_swap: u64 = 1_000_000_000; + let amount_to_be_staked = 440_000_000_000; + let amount_swapped: u64 = 30_000_000_000; + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid1, + hot.clone() + )); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid2, + hot.clone() + )); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + hot.clone(), + netuid1, + amount_to_be_staked, + limit_stake, + allow + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hot.clone(), + netuid1, + netuid2, + amount_swapped, + limit_swap, + allow, + ); + } - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn transfer_stake() { + let coldkey: T::AccountId = whitelisted_caller(); + let dest: T::AccountId = account("B", 0, 2); + let hot: T::AccountId = account("A", 0, 1); + let netuid: u16 = 1; + + SubtokenEnabled::::insert(netuid, true); + Subtensor::::init_new_network(netuid, 1); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + let stake_tao: u64 = 1_000_000; + let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid, + hot.clone() + )); + + SubnetTAO::::insert(netuid, deposit); + SubnetAlphaIn::::insert(netuid, deposit); + TotalStake::::set(deposit); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + hot.clone(), + netuid, + stake_tao, + u64::MAX, + false + )); + + let alpha_to_transfer: u64 = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&hot, &coldkey, netuid); + + Subtensor::::create_account_if_non_existent(&dest, &hot); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + dest.clone(), + hot.clone(), + netuid, + netuid, + alpha_to_transfer, + ); + } - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - Subtensor::::set_burn(netuid, 1); - - let wallet_bal = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), wallet_bal); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - - // Stake 10% of our current total staked TAO - let u64_staked_amt = 100_000_000_000; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), u64_staked_amt); - - assert_ok!( Subtensor::::add_stake(RawOrigin::Signed( coldkey.clone() ).into() , hotkey.clone(), netuid, u64_staked_amt)); - - let amount_unstaked: u64 = 600000; - }: remove_stake_aggregate(RawOrigin::Signed( coldkey.clone() ), hotkey.clone(), netuid, amount_unstaked) - - benchmark_add_stake_limit_aggregate { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - let seed : u32 = 1; - - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true ); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - let amount = 900_000_000_000; - let limit: u64 = 6_000_000_000; - let amount_to_be_staked = 440_000_000_000; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount); - - let tao_reserve = 150_000_000_000_u64; - let alpha_in = 100_000_000_000_u64; - SubnetTAO::::insert(netuid, tao_reserve); - SubnetAlphaIn::::insert(netuid, alpha_in); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: add_stake_limit_aggregate(RawOrigin::Signed( coldkey.clone() ), hotkey, netuid, amount_to_be_staked, limit, false) + #[benchmark] + fn swap_stake() { + let coldkey: T::AccountId = whitelisted_caller(); + let hot: T::AccountId = account("A", 0, 9); + let netuid1: u16 = 1; + let netuid2: u16 = 2; + + SubtokenEnabled::::insert(netuid1, true); + Subtensor::::init_new_network(netuid1, 1); + SubtokenEnabled::::insert(netuid2, true); + Subtensor::::init_new_network(netuid2, 1); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid1); + let stake_tao: u64 = 1_000_000; + let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + netuid1, + hot.clone() + )); + + SubnetTAO::::insert(netuid1, deposit); + SubnetAlphaIn::::insert(netuid1, deposit); + SubnetTAO::::insert(netuid2, deposit); + SubnetAlphaIn::::insert(netuid2, deposit); + TotalStake::::set(deposit); + + assert_ok!(Subtensor::::add_stake_limit( + RawOrigin::Signed(coldkey.clone()).into(), + hot.clone(), + netuid1, + stake_tao, + u64::MAX, + false + )); + + let alpha_to_swap: u64 = + Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet(&hot, &coldkey, netuid1); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hot.clone(), + netuid1, + netuid2, + alpha_to_swap, + ); + } + #[benchmark] + fn batch_commit_weights() { + let hotkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let count: usize = 3; + let mut netuids: Vec> = Vec::new(); + let mut hashes: Vec = Vec::new(); + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + hotkey.clone() + )); + + Subtensor::::set_validator_permit_for_uid(netuid, 0, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + for i in 0..count { + netuids.push(Compact(netuid)); + hashes.push(H256::repeat_byte(i as u8)); + } + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuids.clone(), + hashes.clone(), + ); + } - benchmark_serve_axon{ - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; - - let version: u32 = 2; - let ip: u128 = 1676056785; - let port: u16 = 128; - let ip_type: u8 = 4; - let protocol: u8 = 0; - let placeholder1: u8 = 0; - let placeholder2: u8 = 0; - - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&caller.clone(), amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration(caller_origin.clone(), netuid, caller.clone())); - - Subtensor::::set_serving_rate_limit(netuid, 0); - - }: serve_axon(RawOrigin::Signed( caller.clone() ), netuid, version, ip, port, ip_type, protocol, placeholder1, placeholder2) - - benchmark_serve_prometheus { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let modality: u16 = 0; + #[benchmark] + fn batch_set_weights() { + let hotkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let version: u64 = 1; + let entries: Vec<(Compact, Compact)> = vec![(Compact(0u16), Compact(0u16))]; + let netuids: Vec> = vec![Compact(netuid)]; + let weights: Vec, Compact)>> = vec![entries.clone()]; + let keys: Vec> = vec![Compact(version)]; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuids.clone(), + weights.clone(), + keys.clone(), + ); + } - let version: u32 = 2; - let ip: u128 = 1676056785; - let port: u16 = 128; - let ip_type: u8 = 4; + #[benchmark] + fn commit_crv3_weights() { + let hotkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let vec_commit: Vec = vec![0; MAX_CRV3_COMMIT_SIZE_BYTES as usize]; + let commit: BoundedVec<_, _> = vec_commit.try_into().unwrap(); + let round: u64 = 0; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(hotkey.clone()).into(), + netuid, + hotkey.clone() + )); + + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + + #[extrinsic_call] + _( + RawOrigin::Signed(hotkey.clone()), + netuid, + commit.clone(), + round, + ); + } - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); + #[benchmark] + fn decrease_take() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 1); + let take: u16 = 100; - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&caller.clone(), amount_to_be_staked); + Delegates::::insert(&hotkey, 200u16); + Owner::::insert(&hotkey, &coldkey); - assert_ok!(Subtensor::::do_burned_registration(caller_origin.clone(), netuid, caller.clone())); - Subtensor::::set_serving_rate_limit(netuid, 0); + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), take); + } + + #[benchmark] + fn increase_take() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 2); + let take: u16 = 150; + + Delegates::::insert(&hotkey, 100u16); + Owner::::insert(&hotkey, &coldkey); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), take); + } + + #[benchmark] + fn register_network_with_identity() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 1); + let identity: Option = None; + + Subtensor::::set_network_registration_allowed(1, true); + Subtensor::::set_network_rate_limit(1); + let amount: u64 = 9_999_999_999_999; + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + hotkey.clone(), + identity.clone(), + ); + } + + #[benchmark] + fn serve_axon_tls() { + let caller: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let version: u32 = 1; + let ip: u128 = 0xC0A8_0001; + let port: u16 = 30333; + let ip_type: u8 = 4; + let proto: u8 = 0; + let p1: u8 = 0; + let p2: u8 = 0; + let cert: Vec = vec![]; + + Subtensor::::init_new_network(netuid, 1); + Subtensor::::set_network_registration_allowed(netuid, true); + SubtokenEnabled::::insert(netuid, true); + + let reg_fee = Subtensor::::get_burn_as_u64(netuid); + let deposit: u64 = reg_fee.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&caller, deposit); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(caller.clone()).into(), + netuid, + caller.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + netuid, + version, + ip, + port, + ip_type, + proto, + p1, + p2, + cert.clone(), + ); + } + + #[benchmark] + fn set_identity() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("Alice", 0, 5); + let name = b"n".to_vec(); + let url = vec![]; + let repo = vec![]; + let img = vec![]; + let disc = vec![]; + let descr = vec![]; + let add = vec![]; + + Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); + Subtensor::::init_new_network(1, 1); + let deposit: u64 = 1_000_000_000u64.saturating_mul(2); + Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); + SubtokenEnabled::::insert(1, true); + + assert_ok!(Subtensor::::burned_register( + RawOrigin::Signed(coldkey.clone()).into(), + 1, + hotkey.clone() + )); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + name.clone(), + url.clone(), + repo.clone(), + img.clone(), + disc.clone(), + descr.clone(), + add.clone(), + ); + } + + #[benchmark] + fn set_subnet_identity() { + let coldkey: T::AccountId = whitelisted_caller(); + let netuid: u16 = 1; + let name = b"n".to_vec(); + let repo = vec![]; + let contact = vec![]; + let url = vec![]; + let disc = vec![]; + let descr = vec![]; + let add = vec![]; + + SubnetOwner::::insert(netuid, coldkey.clone()); + SubtokenEnabled::::insert(netuid, true); + + #[extrinsic_call] + _( + RawOrigin::Signed(coldkey.clone()), + netuid, + name.clone(), + repo.clone(), + contact.clone(), + url.clone(), + disc.clone(), + descr.clone(), + add.clone(), + ); + } + + #[benchmark] + fn set_tao_weights() { + let netuid: u16 = 1; + let hotkey: T::AccountId = account("A", 0, 6); + let dests = vec![0u16]; + let weights = vec![0u16]; + let version: u64 = 1; + + Subtensor::::init_new_network(netuid, 1); + + #[extrinsic_call] + _( + RawOrigin::None, + netuid, + hotkey.clone(), + dests.clone(), + weights.clone(), + version, + ); + } + + #[benchmark] + fn swap_hotkey() { + let coldkey: T::AccountId = whitelisted_caller(); + let old: T::AccountId = account("A", 0, 7); + let new: T::AccountId = account("B", 0, 8); + Owner::::insert(&old, &coldkey); + let cost: u64 = Subtensor::::get_key_swap_cost(); + Subtensor::::add_balance_to_coldkey_account(&coldkey, cost); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), old.clone(), new.clone()); + } + + #[benchmark] + fn try_associate_hotkey() { + let coldkey: T::AccountId = whitelisted_caller(); + let hot: T::AccountId = account("A", 0, 1); - }: serve_prometheus(RawOrigin::Signed( caller.clone() ), netuid, version, ip, port, ip_type) + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hot.clone()); + } + + #[benchmark] + fn unstake_all() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("A", 0, 14); + Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); + + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } + + #[benchmark] + fn unstake_all_alpha() { + let coldkey: T::AccountId = whitelisted_caller(); + let hotkey: T::AccountId = account("A", 0, 15); + Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); - /* - benchmark_sudo_register { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 0; - let modality: u16 = 0; - let stake: u64 = 10; - let balance: u64 = 1000000000; - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - - let seed : u32 = 1; - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let hotkey: T::AccountId = account("Alice", 0, seed); - let coldkey: T::AccountId = account("Test", 0, seed); - - let amount_to_be_staked = balance.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - }: sudo_register(RawOrigin::>::Root, netuid, hotkey, coldkey, stake, balance) - */ - benchmark_burned_register { - let netuid: u16 = 1; - let seed : u32 = 1; - let hotkey: T::AccountId = account("Alice", 0, seed); - let coldkey: T::AccountId = account("Test", 0, seed); - let modality: u16 = 0; - let tempo: u16 = 1; - - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_burn(netuid, 1); - - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - }: burned_register(RawOrigin::Signed( coldkey.clone() ), netuid, hotkey) - - - benchmark_root_register { - let netuid: u16 = 1; - let version_key: u64 = 1; - let tempo: u16 = 1; - let seed : u32 = 1; - - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_burn(netuid, 1); - Subtensor::::set_network_registration_allowed( netuid, true); - - Subtensor::::set_max_allowed_uids( netuid, 4096 ); - assert_eq!(Subtensor::::get_max_allowed_uids(netuid), 4096); - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - let amount_to_be_staked = 100_000_000_000_000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - }: root_register(RawOrigin::Signed(coldkey), hotkey) - - benchmark_register_network { - let seed : u32 = 1; - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("TestHotkey", 0, seed); - - Subtensor::::set_network_rate_limit(1); - - let amount_to_be_staked = 100_000_000_000_000u64; - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked.saturating_mul(2)); - }: register_network(RawOrigin::Signed(coldkey), hotkey.clone()) - - // benchmark_dissolve_network { - // let seed : u32 = 1; - - // let coldkey: T::AccountId = account("Test", 0, seed); - // let hotkey: T::AccountId = account("TestHotkey", 0, seed); - - // Subtensor::::set_network_rate_limit(0); - - // let amount_to_be_staked = 100_000_000_000_000u64; - // Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - // assert_ok!(Subtensor::::register_network(RawOrigin::Root.into(), hotkey.clone())); - // }: dissolve_network(RawOrigin::Root, coldkey.clone(), 1) - - - // swap_hotkey { - // let seed: u32 = 1; - // let coldkey: T::AccountId = account("Alice", 0, seed); - // let old_hotkey: T::AccountId = account("Bob", 0, seed); - // let new_hotkey: T::AccountId = account("Charlie", 0, seed); - - // let netuid = 1u16; - // Subtensor::::init_new_network(netuid, 100); - // Subtensor::::set_min_burn(netuid, 1); - // Subtensor::::set_max_burn(netuid, 1); - // Subtensor::::set_target_registrations_per_interval(netuid, 256); - // Subtensor::::set_max_registrations_per_block(netuid, 256); - - // Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), 10_000_000_000u64); - // assert_ok!(Subtensor::::burned_register(RawOrigin::Signed(coldkey.clone()).into(), netuid, old_hotkey.clone())); - // assert_ok!(Subtensor::::become_delegate(RawOrigin::Signed(coldkey.clone()).into(), old_hotkey.clone())); - - // let max_uids = Subtensor::::get_max_allowed_uids(netuid) as u32; - // for i in 0..max_uids - 1 { - // let coldkey: T::AccountId = account("Axon", 0, i); - // let hotkey: T::AccountId = account("Hotkey", 0, i); - - // Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), 10_000_000_000u64); - // assert_ok!(Subtensor::::burned_register(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey)); - // assert_ok!(Subtensor::::add_stake(RawOrigin::Signed(coldkey).into(), old_hotkey.clone(), 1_000_000_000)); - // } - // }: _(RawOrigin::Signed(coldkey), old_hotkey, new_hotkey) - - commit_weights { - let tempo: u16 = 1; - let netuid: u16 = 1; - let version_key: u64 = 0; - let uids: Vec = vec![0]; - let weight_values: Vec = vec![10]; - let hotkey: T::AccountId = account("hot", 0, 1); - let coldkey: T::AccountId = account("cold", 0, 2); - let start_nonce = 300000; - - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey.clone(), - netuid, - uids.clone(), - weight_values.clone(), - version_key, - )); - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - start_nonce, - &hotkey, - ); - let result = Subtensor::::register( - ::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), - netuid, - block_number, - nonce, - work, - hotkey.clone(), - coldkey, - ); - assert_ok!(result); - Subtensor::::set_validator_permit_for_uid(netuid, 0, true); - Subtensor::::set_commit_reveal_weights_enabled(netuid, true); - -}: commit_weights(RawOrigin::Signed(hotkey.clone()), netuid, commit_hash) - -reveal_weights { - let tempo: u16 = 0; - let netuid: u16 = 1; - let version_key: u64 = 0; - let uids: Vec = vec![0]; - let weight_values: Vec = vec![10]; - let salt: Vec = vec![8]; - let hotkey: T::AccountId = account("hot", 0, 1); - let coldkey: T::AccountId = account("cold", 1, 2); - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey, - ); - - let _ = Subtensor::::register( - ::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), - netuid, - block_number, - nonce, - work.clone(), - hotkey.clone(), - coldkey.clone(), - ); - - Subtensor::::set_validator_permit_for_uid(netuid, 0, true); - Subtensor::::set_commit_reveal_weights_enabled(netuid, true); - - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey.clone(), - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key, - )); - let _ = Subtensor::::commit_weights(::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), netuid, commit_hash); - - }: reveal_weights(RawOrigin::Signed(hotkey.clone()), netuid, uids, weight_values, salt, version_key) - - schedule_swap_coldkey { - let old_coldkey: T::AccountId = account("old_cold", 0, 1); - let new_coldkey: T::AccountId = account("new_cold", 1, 2); - Subtensor::::add_balance_to_coldkey_account(&old_coldkey.clone(), 100_000_000_000_000u64); - }: schedule_swap_coldkey(RawOrigin::Signed(old_coldkey.clone()), new_coldkey.clone()) - -// schedule_dissolve_network { -// let coldkey: T::AccountId = account("coldkey", 0, 1); -// let netuid = 1; -// }: schedule_dissolve_network(RawOrigin::Signed(coldkey.clone()), netuid) - - benchmark_sudo_set_tx_childkey_take_rate_limit { - // We don't need to set up any initial state for this benchmark - // as it's a simple setter function that only requires root origin - let new_rate_limit: u64 = 100; -}: sudo_set_tx_childkey_take_rate_limit(RawOrigin::Root, new_rate_limit) - - benchmark_set_childkey_take { - // Setup - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - let coldkey: T::AccountId = account("Cold", 0, seed); - let hotkey: T::AccountId = account("Hot", 0, seed); - let take: u16 = 1000; // 10% in basis points - - // Initialize the network - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - - // Register the hotkey - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1_000_000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); -}: set_childkey_take(RawOrigin::Signed(coldkey), hotkey, netuid, take) - - swap_coldkey { - // Set up initial state - let old_coldkey: T::AccountId = account("old_coldkey", 0, 0); - let new_coldkey: T::AccountId = account("new_coldkey", 0, 0); - let hotkey1: T::AccountId = account("hotkey1", 0, 0); - let netuid = 1u16; - let stake_amount1 = 1000u64; - let stake_amount2 = 2000u64; - let swap_cost = Subtensor::::get_key_swap_cost(); - let free_balance_old = 12345u64 + swap_cost; - let tempo: u16 = 1; - - // Setup initial state - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey1, - ); - - let _ = Subtensor::::register( - ::RuntimeOrigin::from(RawOrigin::Signed(old_coldkey.clone())), - netuid, - block_number, - nonce, - work.clone(), - hotkey1.clone(), - old_coldkey.clone(), - ); - - // Add balance to old coldkey - Subtensor::::add_balance_to_coldkey_account( - &old_coldkey, - stake_amount1 + stake_amount2 + free_balance_old, - ); - - // Insert an Identity - let name: Vec = b"The fourth Coolest Identity".to_vec(); - let identity: ChainIdentity = ChainIdentity { - name: name.clone(), - url: vec![], - image: vec![], - discord: vec![], - description: vec![], - additional: vec![], - }; - - Identities::::insert(&old_coldkey, identity); - - // Benchmark setup complete, now execute the extrinsic -}: swap_coldkey(RawOrigin::Root, old_coldkey.clone(), new_coldkey.clone(), swap_cost) - -batch_reveal_weights { - let tempo: u16 = 0; - let netuid: u16 = 1; - let num_commits: usize = 10; - - let hotkey: T::AccountId = account("hot", 0, 1); - let coldkey: T::AccountId = account("cold", 0, 2); - - Subtensor::::init_new_network(netuid, tempo); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - Subtensor::::set_commit_reveal_weights_enabled(netuid, true); - Subtensor::::set_weights_set_rate_limit(netuid, 0); // Disable rate limiting for benchmarking - - let block_number: u64 = Subtensor::::get_current_block_as_u64(); - let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( - netuid, - block_number, - 3, - &hotkey, - ); - - let origin = T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())); - assert_ok!(Subtensor::::register( - origin.clone(), - netuid, - block_number, - nonce, - work.clone(), - hotkey.clone(), - coldkey.clone(), - )); - - let uid: u16 = 0; - - Subtensor::::set_validator_permit_for_uid(netuid, uid, true); - - let mut uids_list = Vec::new(); - let mut values_list = Vec::new(); - let mut salts_list = Vec::new(); - let mut version_keys = Vec::new(); - - for i in 0..num_commits { - let uids: Vec = vec![uid]; - let values: Vec = vec![i as u16]; - let salt: Vec = vec![i as u16]; - let version_key_i: u64 = i as u64; - - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey.clone(), - netuid, - uids.clone(), - values.clone(), - salt.clone(), - version_key_i, - )); - - assert_ok!(Subtensor::::commit_weights( - T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), - netuid, - commit_hash, - )); - - uids_list.push(uids); - values_list.push(values); - salts_list.push(salt); - version_keys.push(version_key_i); - } -}: batch_reveal_weights( - RawOrigin::Signed(hotkey.clone()), - netuid, - uids_list, - values_list, - salts_list, - version_keys -) - -benchmark_recycle_alpha { - let caller: T::AccountId = whitelisted_caller::(); - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_burn(netuid, 1); - - let amount_to_be_staked = 1_000_000_000u64.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hotkey.clone() - )); - - let alpha_amount: u64 = 1_000_000; - SubnetAlphaOut::::insert(netuid, alpha_amount * 2); - - Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey, - &coldkey, - netuid, - alpha_amount - ); - - assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); -}: recycle_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) - -benchmark_burn_alpha { - let caller: T::AccountId = whitelisted_caller::(); - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_network_registration_allowed(netuid, true); - Subtensor::::set_burn(netuid, 1); - - let amount_to_be_staked = 1_000_000_000u64.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); - - assert_ok!(Subtensor::::do_burned_registration( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hotkey.clone() - )); - - let alpha_amount: u64 = 1_000_000; - SubnetAlphaOut::::insert(netuid, alpha_amount * 2); - - Subtensor::::increase_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey, - &coldkey, - netuid, - alpha_amount - ); - - assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); - -}: burn_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) - - -benchmark_start_call { - let caller: T::AccountId = whitelisted_caller::>(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - let netuid: u16 = 1; - let tempo: u16 = 1; - let seed: u32 = 1; - - // Set up coldkey and hotkey - let coldkey: T::AccountId = account("Test", 0, seed); - let hotkey: T::AccountId = account("Alice", 0, seed); - - // Initialize network - Subtensor::::init_new_network(netuid, tempo); - SubtokenEnabled::::insert(netuid, true); - Subtensor::::set_network_registration_allowed(netuid, true); - - // Register the neuron - Subtensor::::set_burn(netuid, 1); - let amount_to_be_staked = 1000000u32.into(); - Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); - SubnetOwner::::set(netuid, coldkey.clone()); - - assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); - assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); - let current_block: u64 = Subtensor::::get_current_block_as_u64(); - let duration = ::DurationOfStartCall::get(); - let block: BlockNumberFor = (current_block + duration).try_into().ok().expect("can't convert to block number"); - frame_system::Pallet::::set_block_number(block); - -}: start_call(RawOrigin::Signed(coldkey), netuid) - -benchmark_adjust_senate { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("Alice", 0, 1); - let root: u16 = Subtensor::::get_root_netuid(); - Subtensor::::init_new_network(root, 1); - Uids::::insert(root, &hotkey, 0u16); -}: adjust_senate(RawOrigin::Signed(coldkey), hotkey.clone()) - -benchmark_add_stake_limit { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey : T::AccountId = account("Alice", 0, 1); - let netuid : u16 = 1; - let amount : u64 = 1_000_000; - let limit : u64 = 1_000_000; - let allow : bool = true; - Subtensor::::init_new_network(netuid, 1); - Subtensor::::set_network_registration_allowed(netuid, true); - SubtokenEnabled::::insert(netuid, true); - - let bond = Subtensor::::get_burn_as_u64(netuid); - let deposit = (amount + bond + DefaultStakingFee::::get()) * 10; - Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hotkey.clone() - ) - ); - SubnetTAO::::insert(netuid, deposit); - SubnetAlphaIn::::insert(netuid, deposit); - TotalStake::::set(deposit); - -}: add_stake_limit(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), netuid, amount, limit, allow) - -benchmark_move_stake { - let coldkey: T::AccountId = whitelisted_caller::>(); - let origin: T::AccountId = account("A", 0, 1); - let destination: T::AccountId = account("B", 0, 2); - let netuid: u16 = 1; - - SubtokenEnabled::::insert(netuid, true); - Subtensor::::init_new_network(netuid, 1); - let burn_fee = Subtensor::::get_burn_as_u64(netuid); - let stake_tao = 1_000_000; - let deposit = burn_fee.saturating_mul(2).saturating_add(stake_tao); - Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - origin.clone() - ) - ); - - SubnetTAO::::insert(netuid, deposit); - SubnetAlphaIn::::insert(netuid, deposit); - TotalStake::::set(deposit); - - assert_ok!( - Subtensor::::add_stake_limit( - RawOrigin::Signed(coldkey.clone()).into(), - origin.clone(), - netuid, - stake_tao, - u64::MAX, - false - ) - ); - - let alpha_to_move: u64 = - Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet( - &origin, &coldkey, netuid - ); - - Subtensor::::create_account_if_non_existent(&coldkey, &destination); -}: move_stake(RawOrigin::Signed(coldkey.clone()),origin.clone(),destination.clone(),netuid,netuid,alpha_to_move) - -benchmark_remove_stake_limit { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("Alice", 0, 1); - let netuid: u16 = 1; - - Subtensor::::init_new_network(netuid, 1); - Subtensor::::set_network_registration_allowed(netuid, true); - SubtokenEnabled::::insert(netuid, true); - - let bond = Subtensor::::get_burn_as_u64(netuid); - let fee = DefaultStakingFee::::get(); - let amount: u64 = 1_000_000; - let deposit = (amount + bond + fee).saturating_mul(10); - - Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hotkey.clone(), - ) - ); - - SubnetTAO::::insert(netuid, deposit); - SubnetAlphaIn::::insert(netuid, deposit); - SubnetAlphaOut::::insert(netuid, deposit); - TotalStake::::set(deposit); - - assert_ok!( - Subtensor::::add_stake_limit( - RawOrigin::Signed(coldkey.clone()).into(), - hotkey.clone(), - netuid, - amount, - u64::MAX, - false, - ) - ); - - let alpha: u64 = Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey, &coldkey, netuid - ); - - assert_ok!( - Subtensor::::remove_stake_limit( - RawOrigin::Signed(coldkey.clone()).into(), - hotkey.clone(), - netuid, - alpha, - u64::MAX, - true, - ) - ); -}: remove_stake_limit(RawOrigin::Signed(coldkey.clone()),hotkey.clone(),netuid,alpha,u64::MAX,true) - -benchmark_swap_stake_limit { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hot: T::AccountId = account("A", 0, 1); - let netuid: u16 = 1; - let allow: bool = true; - - SubtokenEnabled::::insert(netuid, true); - Subtensor::::init_new_network(netuid, 1); - - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - let stake_tao = 1_000_000; - let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); - Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hot.clone() - ) - ); - - SubnetTAO::::insert(netuid, deposit); - SubnetAlphaIn::::insert(netuid, deposit); - TotalStake::::set(deposit); - - assert_ok!( - Subtensor::::add_stake_limit( - RawOrigin::Signed(coldkey.clone()).into(), - hot.clone(), - netuid, - stake_tao, - u64::MAX, - allow - ) - ); - - let alpha_to_swap: u64 = - Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet( - &hot, &coldkey, netuid - ); -}: swap_stake_limit(RawOrigin::Signed(coldkey.clone()),hot.clone(),netuid,netuid,alpha_to_swap,u64::MAX,allow) - -benchmark_transfer_stake { - let coldkey: T::AccountId = whitelisted_caller::>(); - let dest: T::AccountId = account("B", 0, 2); - let hot: T::AccountId = account("A", 0, 1); - let netuid: u16 = 1; - - SubtokenEnabled::::insert(netuid, true); - Subtensor::::init_new_network(netuid, 1); - - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - let stake_tao = 1_000_000; - let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); - Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hot.clone() - ) - ); - - SubnetTAO::::insert(netuid, deposit); - SubnetAlphaIn::::insert(netuid, deposit); - TotalStake::::set(deposit); - - assert_ok!( - Subtensor::::add_stake_limit( - RawOrigin::Signed(coldkey.clone()).into(), - hot.clone(), - netuid, - stake_tao, - u64::MAX, - false - ) - ); - - let alpha_to_transfer: u64 = - Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet( - &hot, &coldkey, netuid - ); - - Subtensor::::create_account_if_non_existent(&dest, &hot); -}: transfer_stake(RawOrigin::Signed(coldkey.clone()),dest.clone(),hot.clone(),netuid,netuid,alpha_to_transfer) - -benchmark_swap_stake { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hot: T::AccountId = account("A", 0, 9); - let netuid: u16 = 1; - - SubtokenEnabled::::insert(netuid, true); - Subtensor::::init_new_network(netuid, 1); - - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - let stake_tao = 1_000_000; - let deposit = reg_fee.saturating_mul(2).saturating_add(stake_tao); - Subtensor::::add_balance_to_coldkey_account(&coldkey, deposit); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - netuid, - hot.clone() - ) - ); - - SubnetTAO::::insert(netuid, deposit); - SubnetAlphaIn::::insert(netuid, deposit); - TotalStake::::set(deposit); - - assert_ok!( - Subtensor::::add_stake_limit( - RawOrigin::Signed(coldkey.clone()).into(), - hot.clone(), - netuid, - stake_tao, - u64::MAX, - false - ) - ); - - let alpha_to_swap: u64 = - Subtensor::::get_stake_for_hotkey_and_coldkey_on_subnet( - &hot, &coldkey, netuid - ); -}: swap_stake(RawOrigin::Signed(coldkey.clone()),hot.clone(),netuid,netuid,alpha_to_swap) - -benchmark_batch_commit_weights { - let hotkey: T::AccountId = whitelisted_caller::>(); - let netuid: u16 = 1; - let count: usize = 3; - let mut netuids: Vec> = Vec::new(); - let mut hashes: Vec = Vec::new(); - - Subtensor::::init_new_network(netuid, 1); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - SubtokenEnabled::::insert(netuid, true); - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(hotkey.clone()).into(), - netuid, - hotkey.clone() - ) - ); - - Subtensor::::set_validator_permit_for_uid(netuid, 0, true); - Subtensor::::set_commit_reveal_weights_enabled(netuid, true); - - for i in 0..count { - netuids.push( Compact(netuid) ); - hashes.push( H256::repeat_byte(i as u8) ); - } -}: batch_commit_weights(RawOrigin::Signed(hotkey.clone()),netuids, hashes) - -benchmark_batch_set_weights { - let hotkey: T::AccountId = whitelisted_caller::>(); - let netuid: u16 = 1; - let version: u64 = 1; - let entries: Vec<(Compact, Compact)> = vec![ - (Compact(0u16), Compact(0u16)) - ]; - let netuids: Vec> = - vec![ Compact(netuid) ]; - let weights: Vec, Compact)>> = - vec![ entries.clone() ]; - let keys: Vec> = - vec![ Compact(version) ]; - - Subtensor::::init_new_network(netuid, 1); - Subtensor::::set_network_registration_allowed(netuid, true); - SubtokenEnabled::::insert(netuid, true); - - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(hotkey.clone()).into(), - netuid, - hotkey.clone() - ) - ); -}: batch_set_weights(RawOrigin::Signed(hotkey.clone()),netuids, weights, keys) - -benchmark_commit_crv3_weights { - let hotkey: T::AccountId = whitelisted_caller::>(); - let netuid: u16 = 1; - let vec_commit: Vec = vec![0; MAX_CRV3_COMMIT_SIZE_BYTES as usize]; - let commit: BoundedVec<_, _> = - vec_commit.try_into().unwrap(); - let round: u64 = 0; - - Subtensor::::init_new_network(netuid, 1); - Subtensor::::set_network_pow_registration_allowed(netuid, true); - SubtokenEnabled::::insert(netuid, true); - - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - Subtensor::::add_balance_to_coldkey_account(&hotkey, reg_fee.saturating_mul(2)); - - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(hotkey.clone()).into(), - netuid, - hotkey.clone() - ) - ); - - Subtensor::::set_commit_reveal_weights_enabled(netuid, true); -}: commit_crv3_weights(RawOrigin::Signed(hotkey.clone()),netuid, commit, round) - -benchmark_decrease_take { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("Alice", 0, 1); - let take: u16 = 100; - - Delegates::::insert(&hotkey, 200u16); - Owner::::insert(&hotkey, &coldkey); -}: decrease_take(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), take) - -benchmark_increase_take { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("Alice", 0, 2); - let take: u16 = 150; - - Delegates::::insert(&hotkey, 100u16); - Owner::::insert(&hotkey, &coldkey); -}: increase_take(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), take) - -benchmark_register_network_with_identity { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("Alice", 0, 1); - let identity: Option = None; - Subtensor::::set_network_registration_allowed( 1, true ); - Subtensor::::set_network_rate_limit(1); - Subtensor::::add_balance_to_coldkey_account(&coldkey, 9_999_999_999_999u64); -}: register_network_with_identity(RawOrigin::Signed(coldkey.clone()), hotkey.clone(), identity) - -benchmark_serve_axon_tls { - let caller: T::AccountId = whitelisted_caller::>(); - let netuid: u16 = 1; - let version: u32 = 1; - let ip: u128 = 0xC0A8_0001; - let port: u16 = 30333; - let ip_type: u8 = 4; - let proto: u8 = 0; - let p1: u8 = 0; - let p2: u8 = 0; - let cert: Vec = vec![]; - - Subtensor::::init_new_network(netuid, 1); - Subtensor::::set_network_registration_allowed(netuid, true); - SubtokenEnabled::::insert(netuid, true); - - let reg_fee = Subtensor::::get_burn_as_u64(netuid); - Subtensor::::add_balance_to_coldkey_account(&caller, reg_fee.saturating_mul(2)); - assert_ok!( - Subtensor::::burned_register( - RawOrigin::Signed(caller.clone()).into(), - netuid, - caller.clone() - ) - ); -}: serve_axon_tls(RawOrigin::Signed(caller.clone()),netuid,version,ip,port,ip_type,proto,p1,p2,cert) - -benchmark_set_identity { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("Alice", 0, 5); - let name = b"n".to_vec(); - let url = vec![]; - let repo = vec![]; - let img = vec![]; - let disc = vec![]; - let descr= vec![]; - let add = vec![]; - - Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); - Subtensor::::init_new_network(1, 1); - Subtensor::::add_balance_to_coldkey_account(&coldkey, 1_000_000_000u64.saturating_mul(2)); - SubtokenEnabled::::insert(1, true); - assert_ok!( Subtensor::::burned_register( - RawOrigin::Signed(coldkey.clone()).into(), - 1, hotkey.clone() - )); -}: set_identity(RawOrigin::Signed(coldkey.clone()),name, url, repo, img, disc, descr, add) - -benchmark_set_subnet_identity { - let coldkey: T::AccountId = whitelisted_caller::>(); - let netuid: u16 = 1; - let name = b"n".to_vec(); - let repo = vec![]; - let contact = vec![]; - let url = vec![]; - let disc = vec![]; - let descr = vec![]; - let add = vec![]; - - SubnetOwner::::insert(netuid, coldkey.clone()); - SubtokenEnabled::::insert(netuid, true); -}: set_subnet_identity(RawOrigin::Signed(coldkey.clone()), netuid, name, repo, contact, url, disc, descr, add) - -benchmark_set_tao_weights { - let netuid: u16 = 1; - let hotkey: T::AccountId = account("A", 0, 6); - let dests = vec![0u16]; - let weights = vec![0u16]; - let version: u64 = 1; - - Subtensor::::init_new_network(netuid, 1); -}: set_tao_weights(RawOrigin::None, netuid, hotkey.clone(), dests, weights, version) - -benchmark_swap_hotkey { - let coldkey: T::AccountId = whitelisted_caller::>(); - let old: T::AccountId = account("A", 0, 7); - let new: T::AccountId = account("B", 0, 8); - Owner::::insert(&old, &coldkey); - let cost = Subtensor::::get_key_swap_cost(); - Subtensor::::add_balance_to_coldkey_account(&coldkey, cost); -}: swap_hotkey(RawOrigin::Signed(coldkey.clone()), old.clone(), new.clone()) - -benchmark_try_associate_hotkey { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hot: T::AccountId = account("A", 0, 1); -}: try_associate_hotkey(RawOrigin::Signed(coldkey.clone()), hot.clone()) - -benchmark_unstake_all { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("A", 0, 14); - Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); -}: unstake_all(RawOrigin::Signed(coldkey.clone()), hotkey.clone()) - -benchmark_unstake_all_alpha { - let coldkey: T::AccountId = whitelisted_caller::>(); - let hotkey: T::AccountId = account("A", 0, 15); - Subtensor::::create_account_if_non_existent(&coldkey, &hotkey); -}: unstake_all_alpha(RawOrigin::Signed(coldkey.clone()), hotkey.clone()) + #[extrinsic_call] + _(RawOrigin::Signed(coldkey.clone()), hotkey.clone()); + } } diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index b4f23ced83..6a53cb135b 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -55,6 +55,11 @@ pub fn u16_proportion_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x).safe_div(I32F32::saturating_from_num(u16::MAX)) } +#[allow(dead_code)] +pub fn fixed_to_fixed_u16_proportion(x: I32F32) -> I32F32 { + x.safe_div(I32F32::saturating_from_num(u16::MAX)) +} + #[allow(dead_code)] pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { fixed_to_u16(x.saturating_mul(I32F32::saturating_from_num(u16::MAX))) @@ -80,11 +85,6 @@ pub fn vec_fixed64_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_u64).collect() } -#[allow(dead_code)] -pub fn vec_u16_proportions_to_fixed(vec: Vec) -> Vec { - vec.into_iter().map(u16_proportion_to_fixed).collect() -} - #[allow(dead_code)] pub fn vec_fixed_proportions_to_u16(vec: Vec) -> Vec { vec.into_iter().map(fixed_proportion_to_u16).collect() @@ -1207,6 +1207,48 @@ pub fn interpolate_sparse( result } +// Element-wise product of two vectors. +#[allow(dead_code)] +pub fn vec_mul(a: &[I32F32], b: &[I32F32]) -> Vec { + a.iter() + .zip(b.iter()) + .map(|(x, y)| x.checked_mul(*y).unwrap_or_default()) + .collect() +} + +// Element-wise product of matrix and vector +pub fn mat_vec_mul(matrix: &[Vec], vector: &[I32F32]) -> Vec> { + let Some(first_row) = matrix.first() else { + return vec![vec![]]; + }; + if first_row.is_empty() { + return vec![vec![]]; + } + matrix.iter().map(|row| vec_mul(row, vector)).collect() +} + +// Element-wise product of matrix and vector +#[allow(dead_code)] +pub fn mat_vec_mul_sparse( + matrix: &[Vec<(u16, I32F32)>], + vector: &[I32F32], +) -> Vec> { + let mut result: Vec> = vec![vec![]; matrix.len()]; + for (i, matrix_row) in matrix.iter().enumerate() { + for (j, value) in matrix_row.iter() { + if let Some(vector_value) = vector.get(*j as usize) { + let new_value = value.saturating_mul(*vector_value); + if new_value != I32F32::saturating_from_num(0.0) { + if let Some(result_row) = result.get_mut(i) { + result_row.push((*j, new_value)); + } + } + } + } + } + result +} + // Element-wise product of two matrices. #[allow(dead_code)] pub fn hadamard(mat1: &[Vec], mat2: &[Vec]) -> Vec> { @@ -1259,6 +1301,20 @@ pub fn hadamard_sparse( result } +/// Clamp the input value between high and low. +/// Note: assumes high > low +pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { + // First, clamp the value to ensure it does not exceed the upper bound (high). + // If the value is greater than 'high', it will be set to 'high'. + // otherwise it remains unchanged. + value + .min(I32F32::from_num(high)) + // Next, clamp the value to ensure it does not go below the lower bound (_low). + // If the value (after the first clamping) is less than 'low', it will be set to 'low'. + // otherwise it remains unchanged. + .max(I32F32::from_num(low)) +} + // Return matrix exponential moving average: `alpha * a_ij + one_minus_alpha * b_ij`. // `alpha` is the EMA coefficient, how much to add of the new observation, typically small, // higher alpha discounts older observations faster. @@ -1319,144 +1375,117 @@ pub fn mat_ema_sparse( result } -// Return sparse matrix only with elements >= threshold of an input sparse matrix. -#[allow(dead_code)] -pub fn sparse_threshold(w: &[Vec<(u16, I32F32)>], threshold: I32F32) -> Vec> { - w.iter() - .map(|row| { - row.iter() - .filter(|(_, weight)| *weight >= threshold) - .copied() - .collect() - }) - .collect() -} - /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. #[allow(dead_code)] -pub fn mat_ema_alpha_vec_sparse( +pub fn mat_ema_alpha_sparse( new: &[Vec<(u16, I32F32)>], old: &[Vec<(u16, I32F32)>], - alpha: &[I32F32], + alpha: &[Vec], ) -> Vec> { - // Ensure the new and old matrices have the same number of rows. + // Ensure dimensions match. assert!(new.len() == old.len()); - let n = new.len(); // Assume square matrix, rows=cols + assert!(new.len() == alpha.len()); + + // The output vector of rows. + let mut result: Vec> = Vec::with_capacity(new.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - let mut result: Vec> = vec![vec![]; n]; + let one = I32F32::saturating_from_num(1.0); // Iterate over each row of the matrices. - for (i, (new_row, old_row)) in new.iter().zip(old).enumerate() { + for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { // Initialize a row of zeros for the result matrix. - let mut row: Vec = vec![zero; n]; + let mut decayed_values: Vec = vec![zero; alpha_row.len()]; - // Process the new matrix values. - for (j, value) in new_row.iter() { - // Retrieve the alpha value for the current column. - let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero); - // Compute the EMA component for the new value using saturating multiplication. - if let Some(row_val) = row.get_mut(*j as usize) { - *row_val = alpha_val.saturating_mul(*value); - } - log::trace!( - "new[{}][{}] * alpha[{}] = {} * {} = {}", - i, - j, - j, - value, - alpha_val, - row.get(*j as usize).unwrap_or(&zero) - ); - } + let mut result_row: Vec<(u16, I32F32)> = Vec::new(); // Process the old matrix values. - for (j, value) in old_row.iter() { - // Retrieve the alpha value for the current column. - let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero); - // Calculate the complement of the alpha value using saturating subtraction. - let one_minus_alpha: I32F32 = - I32F32::saturating_from_num(1.0).saturating_sub(alpha_val); - // Compute the EMA component for the old value and add it to the row using saturating operations. - if let Some(row_val) = row.get_mut(*j as usize) { - *row_val = row_val.saturating_add(one_minus_alpha.saturating_mul(*value)); + for (j, old_val) in old_row.iter() { + if let (Some(alpha_val), Some(decayed_val)) = ( + alpha_row.get(*j as usize), + decayed_values.get_mut(*j as usize), + ) { + // Calculate the complement of the alpha value + let one_minus_alpha = one.saturating_sub(*alpha_val); + // Bonds_decayed = Bonds * (1 - alpha) + *decayed_val = one_minus_alpha.saturating_mul(*old_val); } - log::trace!( - "old[{}][{}] * (1 - alpha[{}]) = {} * {} = {}", - i, - j, - j, - value, - one_minus_alpha, - one_minus_alpha.saturating_mul(*value) - ); } - // Collect the non-zero values into the result matrix. - for (j, value) in row.iter().enumerate() { - if *value > zero { - if let Some(result_row) = result.get_mut(i) { - result_row.push((j as u16, *value)); - log::trace!("result[{}][{}] = {}", i, j, value); + // Process the new matrix values. + for (j, new_val) in new_row.iter() { + if let (Some(alpha_val), Some(decayed_val)) = + (alpha_row.get(*j as usize), decayed_values.get(*j as usize)) + { + // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap + // Validators allocate their purchase across miners based on weights + let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); + let result_val = decayed_val.saturating_add(purchase_increment).min(one); + + if result_val > zero { + result_row.push((*j, result_val)); } } } + result.push(result_row); } // Return the computed EMA sparse matrix. result } -/// Return matrix exponential moving average: `alpha_j * a_ij + one_minus_alpha_j * b_ij`. -/// `alpha_` is the EMA coefficient passed as a vector per column. +/// Calculates the exponential moving average (EMA) for a dense matrix using dynamic alpha values. #[allow(dead_code)] -pub fn mat_ema_alpha_vec( - new: &[Vec], - old: &[Vec], - alpha: &[I32F32], +pub fn mat_ema_alpha( + new: &[Vec], // Weights + old: &[Vec], // Bonds + alpha: &[Vec], ) -> Vec> { // Check if the new matrix is empty or its first row is empty. if new.is_empty() || new.first().is_none_or(|row| row.is_empty()) { return vec![vec![]; 1]; } - // Ensure the dimensions of the new and old matrices match. + // Ensure the dimensions of the new, old and alpha matrices match. assert!(new.len() == old.len()); - assert!(new.first().map_or(0, |row| row.len()) == alpha.len()); + assert!(new.len() == alpha.len()); // Initialize the result matrix with zeros, having the same dimensions as the new matrix. - let mut result: Vec> = - vec![ - vec![I32F32::saturating_from_num(0.0); new.first().map_or(0, |row| row.len())]; - new.len() - ]; + let zero: I32F32 = I32F32::saturating_from_num(0.0); + let one = I32F32::saturating_from_num(1.0); + + let mut result: Vec> = Vec::with_capacity(new.len()); // Iterate over each row of the matrices. - for (i, (new_row, old_row)) in new.iter().zip(old).enumerate() { - // Ensure the current row of the new and old matrices have the same length. + for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { assert!(new_row.len() == old_row.len()); + assert!(new_row.len() == alpha_row.len()); + let mut result_row: Vec = Vec::new(); // Iterate over each column of the current row. - for (j, &alpha_val) in alpha.iter().enumerate().take(new_row.len()) { - // Calculate the complement of the alpha value using saturating subtraction. - let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha_val); - + for j in 0..new_row.len() { // Compute the EMA for the current element using saturating operations. - if let (Some(new_val), Some(old_val), Some(result_val)) = ( - new_row.get(j), - old_row.get(j), - result.get_mut(i).and_then(|row| row.get_mut(j)), - ) { - *result_val = alpha_val - .saturating_mul(*new_val) - .saturating_add(one_minus_alpha.saturating_mul(*old_val)); + if let (Some(new_val), Some(old_val), Some(alpha_val)) = + (new_row.get(j), old_row.get(j), alpha_row.get(j)) + { + // Calculate the complement of the alpha value + let one_minus_alpha = one.saturating_sub(*alpha_val); + + // Bonds_decayed = Bonds * (1 - alpha) + let decayed_val = one_minus_alpha.saturating_mul(*old_val); + + // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap + // Validators allocate their purchase across miners based on weights + let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); + let result_val = decayed_val.saturating_add(purchase_increment).min(one); + result_row.push(result_val); } } + result.push(result_row); } // Return the computed EMA matrix. result } - /// Return the quantile of a vector of I32F32 values. pub fn quantile(data: &[I32F32], quantile: f64) -> I32F32 { // Clone the input data to avoid modifying the original vector. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 62027f9636..1b87388b85 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -12,7 +12,7 @@ impl Pallet { pub fn epoch_dense(netuid: u16, rao_emission: u64) -> Vec<(T::AccountId, u64, u64)> { // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); - log::trace!("n:\n{:?}\n", n); + log::trace!("n: {:?}", n); // ====================== // == Active & updated == @@ -20,7 +20,7 @@ impl Pallet { // Get current block. let current_block: u64 = Self::get_current_block_as_u64(); - log::trace!("current_block:\n{:?}\n", current_block); + log::trace!("current_block: {:?}", current_block); // Get tempo. let tempo: u64 = Self::get_tempo(netuid).into(); @@ -28,25 +28,25 @@ impl Pallet { // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; - log::trace!("activity_cutoff:\n{:?}\n", activity_cutoff); + log::trace!("activity_cutoff: {:?}", activity_cutoff); // Last update vector. let last_update: Vec = Self::get_last_update(netuid); - log::trace!("Last update:\n{:?}\n", &last_update); + log::trace!("Last update: {:?}", &last_update); // Inactive mask. let inactive: Vec = last_update .iter() .map(|updated| updated.saturating_add(activity_cutoff) < current_block) .collect(); - log::trace!("Inactive:\n{:?}\n", inactive.clone()); + log::trace!("Inactive: {:?}", inactive.clone()); // Logical negation of inactive. let active: Vec = inactive.iter().map(|&b| !b).collect(); // Block at registration vector (block when each neuron was most recently registered). let block_at_registration: Vec = Self::get_block_at_registration(netuid); - log::trace!("Block at registration:\n{:?}\n", &block_at_registration); + log::trace!("Block at registration: {:?}", &block_at_registration); // Outdated matrix, outdated_ij=True if i has last updated (weights) after j has last registered. let outdated: Vec> = last_update @@ -58,7 +58,7 @@ impl Pallet { .collect() }) .collect(); - log::trace!("Outdated:\n{:?}\n", &outdated); + log::trace!("Outdated: {:?}", &outdated); // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. // Mask if: the last tempo block happened *before* the registration block @@ -68,7 +68,7 @@ impl Pallet { .iter() .map(|registered| last_tempo <= *registered) .collect(); - log::trace!("Recently registered:\n{:?}\n", &recently_registered); + log::trace!("Recently registered: {:?}", &recently_registered); // =========== // == Stake == @@ -84,7 +84,7 @@ impl Pallet { Self::get_stake_weights_for_network(netuid); inplace_normalize_64(&mut total_stake); let stake: Vec = vec_fixed64_to_fixed32(total_stake); - log::trace!("S:\n{:?}\n", &stake); + log::trace!("S: {:?}", &stake); // ======================= // == Validator permits == @@ -119,7 +119,7 @@ impl Pallet { // Normalize active stake. inplace_normalize(&mut active_stake); - log::trace!("S:\n{:?}\n", &active_stake); + log::trace!("S: {:?}", &active_stake); // ============= // == Weights == @@ -130,7 +130,7 @@ impl Pallet { // Access network weights row unnormalized. let mut weights: Vec> = Self::get_weights(netuid); - log::trace!("W:\n{:?}\n", &weights); + log::trace!("W: {:?}", &weights); // Mask weights that are not from permitted validators. inplace_mask_rows(&validator_forbids, &mut weights); @@ -144,15 +144,15 @@ impl Pallet { } inplace_mask_diag(&mut weights); - log::trace!("W (permit+diag):\n{:?}\n", &weights); + log::trace!("W (permit+diag): {:?}", &weights); // Mask outdated weights: remove weights referring to deregistered neurons. inplace_mask_matrix(&outdated, &mut weights); - log::trace!("W (permit+diag+outdate):\n{:?}\n", &weights); + log::trace!("W (permit+diag+outdate): {:?}", &weights); // Normalize remaining weights. inplace_row_normalize(&mut weights); - log::trace!("W (mask+norm):\n{:?}\n", &weights); + log::trace!("W (mask+norm): {:?}", &weights); // ================================ // == Consensus, Validator Trust == @@ -183,7 +183,7 @@ impl Pallet { inplace_normalize(&mut ranks); let incentive: Vec = ranks.clone(); - log::trace!("I:\n{:?}\n", &incentive); + log::trace!("I: {:?}", &incentive); // ========================= // == Bonds and Dividends == @@ -197,26 +197,61 @@ impl Pallet { let weights_for_bonds: Vec> = interpolate(&weights, &clipped_weights, bonds_penalty); - // Access network bonds. - let mut bonds: Vec> = Self::get_bonds(netuid); - // Remove bonds referring to neurons that have registered since last tempo. - inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds - inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 - log::trace!("B:\n{:?}\n", &bonds); - - // Compute bonds delta column normalized. - let mut bonds_delta: Vec> = row_hadamard(&weights_for_bonds, &active_stake); // ΔB = W◦S - inplace_col_normalize(&mut bonds_delta); // sum_i b_ij = 1 - log::trace!("ΔB:\n{:?}\n", &bonds_delta); - // Compute the Exponential Moving Average (EMA) of bonds. - let mut ema_bonds = Self::compute_ema_bonds(netuid, consensus.clone(), bonds_delta, bonds); - inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 - log::trace!("emaB:\n{:?}\n", &ema_bonds); - - // Compute dividends: d_i = SUM(j) b_ij * inc_j - let mut dividends: Vec = matmul_transpose(&ema_bonds, &incentive); - inplace_normalize(&mut dividends); - log::trace!("D:\n{:?}\n", ÷nds); + let mut dividends: Vec; + let mut ema_bonds: Vec>; + if Yuma3On::::get(netuid) { + // Access network bonds. + let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid); + inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds + log::trace!("B: {:?}", &bonds); + + // Compute the Exponential Moving Average (EMA) of bonds. + ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus); + log::trace!("emaB: {:?}", &ema_bonds); + + // Normalize EMA bonds. + let mut ema_bonds_norm = ema_bonds.clone(); + inplace_col_normalize(&mut ema_bonds_norm); + log::trace!("emaB norm: {:?}", &ema_bonds_norm); + + // # === Dividend Calculation=== + let total_bonds_per_validator: Vec = + row_sum(&mat_vec_mul(&ema_bonds_norm, &incentive)); + log::trace!( + "total_bonds_per_validator: {:?}", + &total_bonds_per_validator + ); + + dividends = vec_mul(&total_bonds_per_validator, &active_stake); + inplace_normalize(&mut dividends); + log::trace!("D: {:?}", ÷nds); + } else { + // original Yuma - liquid alpha disabled + // Access network bonds. + let mut bonds: Vec> = Self::get_bonds(netuid); + // Remove bonds referring to neurons that have registered since last tempo. + inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds + inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 + log::trace!("B: {:?}", &bonds); + + // Compute bonds delta column normalized. + let mut bonds_delta: Vec> = row_hadamard(&weights_for_bonds, &active_stake); // ΔB = W◦S + inplace_col_normalize(&mut bonds_delta); // sum_i b_ij = 1 + log::trace!("ΔB: {:?}", &bonds_delta); + + // Compute the Exponential Moving Average (EMA) of bonds. + ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid); + inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 + log::trace!("emaB: {:?}", &ema_bonds); + + // Compute dividends: d_i = SUM(j) b_ij * inc_j + dividends = matmul_transpose(&ema_bonds, &incentive); + inplace_normalize(&mut dividends); + log::trace!("Dividends: {:?}", ÷nds); + + // Column max-upscale EMA bonds for storage: max_i w_ij = 1. + inplace_col_max_upscale(&mut ema_bonds); + } // ================================= // == Emission and Pruning scores == @@ -341,8 +376,6 @@ impl Pallet { ValidatorTrust::::insert(netuid, cloned_validator_trust); ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - // Column max-upscale EMA bonds for storage: max_i w_ij = 1. - inplace_col_max_upscale(&mut ema_bonds); new_validator_permits .iter() .zip(validator_permits) @@ -476,7 +509,7 @@ impl Pallet { // Normalize active stake. inplace_normalize(&mut active_stake); - log::debug!("Active Stake:\n{:?}\n", &active_stake); + log::trace!("Active Stake: {:?}", &active_stake); // ============= // == Weights == @@ -545,7 +578,7 @@ impl Pallet { // Compute server trust: ratio of rank after vs. rank before. let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) - log::trace!("T: {:?}", &trust); + log::trace!("Trust: {:?}", &trust); inplace_normalize(&mut ranks); // range: I32F32(0, 1) let incentive: Vec = ranks.clone(); @@ -563,47 +596,92 @@ impl Pallet { let weights_for_bonds: Vec> = interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty); - // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_sparse(netuid); - log::trace!("B: {:?}", &bonds); + let mut dividends: Vec; + let mut ema_bonds: Vec>; + if Yuma3On::::get(netuid) { + // Access network bonds. + let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + log::trace!("Bonds: {:?}", &bonds); + + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( + &bonds, + last_tempo, + &block_at_registration, + &|last_tempo, registered| last_tempo <= registered, + ); + log::trace!("Bonds: (mask) {:?}", &bonds); + + // Compute the Exponential Moving Average (EMA) of bonds. + log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); + ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + log::trace!("emaB: {:?}", &ema_bonds); + + // Normalize EMA bonds. + let mut ema_bonds_norm = ema_bonds.clone(); + inplace_col_normalize_sparse(&mut ema_bonds_norm, n); // sum_i b_ij = 1 + log::trace!("emaB norm: {:?}", &ema_bonds_norm); + + // # === Dividend Calculation=== + let total_bonds_per_validator: Vec = + row_sum_sparse(&mat_vec_mul_sparse(&ema_bonds_norm, &incentive)); + log::trace!( + "total_bonds_per_validator: {:?}", + &total_bonds_per_validator + ); - // Remove bonds referring to neurons that have registered since last tempo. - // Mask if: the last tempo block happened *before* the registration block - // ==> last_tempo <= registered - let last_tempo: u64 = current_block.saturating_sub(tempo); - bonds = scalar_vec_mask_sparse_matrix( - &bonds, - last_tempo, - &block_at_registration, - &|last_tempo, registered| last_tempo <= registered, - ); - log::trace!("B (outdatedmask): {:?}", &bonds); - - // Normalize remaining bonds: sum_i b_ij = 1. - inplace_col_normalize_sparse(&mut bonds, n); - log::trace!("B (mask+norm): {:?}", &bonds); - - // Compute bonds delta column normalized. - let mut bonds_delta: Vec> = - row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) - log::trace!("ΔB: {:?}", &bonds_delta); - - // Normalize bonds delta. - inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 - log::trace!("ΔB (norm): {:?}", &bonds_delta); - - // Compute the Exponential Moving Average (EMA) of bonds. - let mut ema_bonds = - Self::compute_ema_bonds_sparse(netuid, consensus.clone(), bonds_delta, bonds); - // Normalize EMA bonds. - inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 - log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); - - // Compute dividends: d_i = SUM(j) b_ij * inc_j. - // range: I32F32(0, 1) - let mut dividends: Vec = matmul_transpose_sparse(&ema_bonds, &incentive); - inplace_normalize(&mut dividends); - log::trace!("Dividends: {:?}", ÷nds); + dividends = vec_mul(&total_bonds_per_validator, &active_stake); + inplace_normalize(&mut dividends); + log::trace!("Dividends: {:?}", ÷nds); + } else { + // original Yuma - liquid alpha disabled + // Access network bonds. + let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + log::trace!("B: {:?}", &bonds); + + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( + &bonds, + last_tempo, + &block_at_registration, + &|last_tempo, registered| last_tempo <= registered, + ); + log::trace!("B (outdatedmask): {:?}", &bonds); + + // Normalize remaining bonds: sum_i b_ij = 1. + inplace_col_normalize_sparse(&mut bonds, n); + log::trace!("B (mask+norm): {:?}", &bonds); + + // Compute bonds delta column normalized. + let mut bonds_delta: Vec> = + row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) + log::trace!("ΔB: {:?}", &bonds_delta); + + // Normalize bonds delta. + inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 + log::trace!("ΔB (norm): {:?}", &bonds_delta); + + // Compute the Exponential Moving Average (EMA) of bonds. + ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + // Normalize EMA bonds. + inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 + log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); + + // Compute dividends: d_i = SUM(j) b_ij * inc_j. + // range: I32F32(0, 1) + dividends = matmul_transpose_sparse(&ema_bonds, &incentive); + inplace_normalize(&mut dividends); + log::trace!("Dividends: {:?}", ÷nds); + + // Column max-upscale EMA bonds for storage: max_i w_ij = 1. + inplace_col_max_upscale_sparse(&mut ema_bonds, n); + } // ================================= // == Emission and Pruning scores == @@ -734,8 +812,6 @@ impl Pallet { ValidatorTrust::::insert(netuid, cloned_validator_trust); ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - // Column max-upscale EMA bonds for storage: max_i w_ij = 1. - inplace_col_max_upscale_sparse(&mut ema_bonds, n); new_validator_permits .iter() .zip(validator_permits) @@ -848,7 +924,7 @@ impl Pallet { bonds .get_mut(uid_i as usize) .expect("uid_i is filtered to be less than n; qed") - .push((uid_j, I32F32::saturating_from_num(bonds_ij))); + .push((uid_j, u16_to_fixed(bonds_ij))); } } bonds @@ -868,186 +944,30 @@ impl Pallet { .expect("uid_i has been filtered to be less than n; qed") .get_mut(uid_j as usize) .expect("uid_j has been filtered to be less than n; qed") = - I32F32::saturating_from_num(bonds_ij); + u16_to_fixed(bonds_ij); } } bonds } - /// Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. - /// - /// # Args: - /// * `alpha_high` - The high alpha value. - /// * `alpha_low` - The low alpha value. - /// * `consensus_high` - The high consensus value. - /// * `consensus_low` - The low consensus value. - /// - /// # Returns: - /// A tuple containing the slope 'a' and intercept 'b' for the logistic function. - pub fn calculate_logistic_params( - alpha_high: I32F32, - alpha_low: I32F32, - consensus_high: I32F32, - consensus_low: I32F32, - ) -> (I32F32, I32F32) { - log::trace!("alpha_high: {:?}", alpha_high); - log::trace!("alpha_low: {:?}", alpha_low); - log::trace!("consensus_high: {:?}", consensus_high); - log::trace!("consensus_low: {:?}", consensus_low); - // Check for division by zero - // extra caution to ensure we never divide by zero - if consensus_high <= consensus_low || alpha_low == 0 || alpha_high == 0 { - // Return 0 for both 'a' and 'b' when consensus values are equal - return ( - I32F32::saturating_from_num(0.0), - I32F32::saturating_from_num(0.0), - ); - } - - // Calculate the slope 'a' of the logistic function. - // a = (ln((1 / alpha_high - 1)) - ln((1 / alpha_low - 1))) / (consensus_low - consensus_high) - let a = (safe_ln( - (I32F32::saturating_from_num(1.0).safe_div(alpha_high)) - .saturating_sub(I32F32::saturating_from_num(1.0)), - ) - .saturating_sub(safe_ln( - (I32F32::saturating_from_num(1.0).safe_div(alpha_low)) - .saturating_sub(I32F32::saturating_from_num(1.0)), - ))) - .safe_div(consensus_low.saturating_sub(consensus_high)); - log::trace!("a: {:?}", a); - - // Calculate the intercept 'b' of the logistic function. - // b = ln((1 / alpha_low - 1)) + a * consensus_low - let b = safe_ln( - (I32F32::saturating_from_num(1.0).safe_div(alpha_low)) - .saturating_sub(I32F32::saturating_from_num(1.0)), - ) - .saturating_add(a.saturating_mul(consensus_low)); - log::trace!("b: {:?}", b); - - // Return the calculated slope 'a' and intercept 'b'. - (a, b) - } - - /// Compute the alpha values using the logistic function parameters 'a' and 'b'. - /// - /// # Args: - /// * `consensus` - A vector of consensus values. - /// * `a` - The slope of the logistic function. - /// * `b` - The intercept of the logistic function. - /// - /// # Returns: - /// A vector of computed alpha values. - pub fn compute_alpha_values(consensus: &[I32F32], a: I32F32, b: I32F32) -> Vec { - // Compute the alpha values for each consensus value. - let alpha: Vec = consensus - .iter() - .map(|c| { - // Calculate the exponent value for the logistic function. - // exp_val = exp(b - a * c) - let exp_val = safe_exp(b.saturating_sub(a.saturating_mul(*c))); - - // Compute the alpha value using the logistic function formula. - // alpha = 1 / (1 + exp_val) - I32F32::saturating_from_num(1.0) - .safe_div(I32F32::saturating_from_num(1.0).saturating_add(exp_val)) - }) - .collect(); - - // Log the computed alpha values for debugging purposes. - log::trace!("alpha: {:?}", alpha); - - // Return the computed alpha values. - alpha - } - - /// Clamp the alpha values between alpha_high and alpha_low. - /// - /// # Args: - /// * `alpha` - A vector of alpha values. - /// * `alpha_high` - The high alpha value. - /// * `alpha_low` - The low alpha value. - /// - /// # Returns: - /// A vector of clamped alpha values. - pub fn clamp_alpha_values( - alpha: Vec, - alpha_high: I32F32, - alpha_low: I32F32, - ) -> Vec { - let clamped_alpha: Vec = alpha - .iter() - .map(|a| { - // First, clamp the value to ensure it does not exceed the upper bound (alpha_high). - // If 'a' is greater than 'alpha_high', it will be set to 'alpha_high'. - // If 'a' is less than or equal to 'alpha_high', it remains unchanged. - let clamped_a = a - .min(&alpha_high) - // Next, clamp the value to ensure it does not go below the lower bound (alpha_low). - // If the value (after the first clamping) is less than 'alpha_low', it will be set to 'alpha_low'. - // If the value is greater than or equal to 'alpha_low', it remains unchanged. - .max(&alpha_low); - // Return the clamped value. - *clamped_a - }) - .collect(); - log::trace!("alpha_clamped: {:?}", clamped_alpha); - clamped_alpha - } - - /// Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values for a sparse matrix. - /// - /// # Args: - /// * `bonds_delta` - A vector of bond deltas. - /// * `bonds` - A vector of bonds. - /// * `alpha` - A vector of clamped alpha values. - /// - /// # Returns: - /// A vector of EMA bonds. - pub fn compute_ema_bonds_with_liquid_alpha_sparse( - bonds_delta: &[Vec<(u16, I32F32)>], - bonds: &[Vec<(u16, I32F32)>], - alpha: Vec, - ) -> Vec> { - // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. - let ema_bonds = mat_ema_alpha_vec_sparse(bonds_delta, bonds, &alpha); - - // Log the computed EMA bonds for debugging purposes. - log::trace!( - "Exponential Moving Average Bonds Liquid Alpha: {:?}", - ema_bonds - ); - - // Return the computed EMA bonds. - ema_bonds + pub fn get_bonds_fixed_proportion(netuid: u16) -> Vec> { + let mut bonds = Self::get_bonds(netuid); + bonds.iter_mut().for_each(|bonds_row| { + bonds_row + .iter_mut() + .for_each(|bond| *bond = fixed_to_fixed_u16_proportion(*bond)); + }); + bonds } - /// Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. - /// - /// # Args: - /// * `bonds_delta` - A vector of bond deltas. - /// * `bonds` - A vector of bonds. - /// * `alpha` - A vector of clamped alpha values. - /// - /// # Returns: - /// A vector of EMA bonds. - pub fn compute_ema_bonds_with_liquid_alpha( - bonds_delta: &[Vec], - bonds: &[Vec], - alpha: Vec, - ) -> Vec> { - // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. - let ema_bonds = mat_ema_alpha_vec(bonds_delta, bonds, &alpha); - - // Log the computed EMA bonds for debugging purposes. - log::trace!( - "Exponential Moving Average Bonds Liquid Alpha: {:?}", - ema_bonds - ); - - // Return the computed EMA bonds. - ema_bonds + pub fn get_bonds_sparse_fixed_proportion(netuid: u16) -> Vec> { + let mut bonds = Self::get_bonds_sparse(netuid); + bonds.iter_mut().for_each(|bonds_row| { + bonds_row + .iter_mut() + .for_each(|(_, bond)| *bond = fixed_to_fixed_u16_proportion(*bond)); + }); + bonds } /// Compute the Exponential Moving Average (EMA) of bonds using a normal alpha value for a sparse matrix. @@ -1118,93 +1038,63 @@ impl Pallet { ema_bonds } - /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting for a sparse matrix. + /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting /// /// # Args: /// * `netuid` - The network ID. - /// * `consensus` - A vector of consensus values. - /// * `bonds_delta` - A vector of bond deltas. + /// * `weights` - A vector of weights. /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// * `active_stake` - A vector of active stake values. /// /// # Returns: /// A vector of EMA bonds. - pub fn compute_ema_bonds_sparse( + pub fn compute_bonds( netuid: u16, - consensus: Vec, - bonds_delta: Vec>, - bonds: Vec>, - ) -> Vec> { + weights: &[Vec], // weights_for_bonds + bonds: &[Vec], + consensus: &[I32F32], + ) -> Vec> { // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. - // This way we avoid the quantil function panic. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() && consensus .iter() .any(|&c| c != I32F32::saturating_from_num(0)) { - // Calculate the 75th percentile (high) and 25th percentile (low) of the consensus values. - let consensus_high = quantile(&consensus, 0.75); - let consensus_low = quantile(&consensus, 0.25); - // Further check if the high and low consensus values meet the required conditions. - if (consensus_high > consensus_low) || consensus_high != 0 || consensus_low < 0 { - // if (consensus_high > consensus_low) || consensus_high != 0) || consensus_low != 0 { - // if (consensus_high > consensus_low) || consensus_low != 0 { - log::trace!("Using Liquid Alpha"); - - // Get the high and low alpha values for the network. - let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - log::trace!("alpha_low: {:?} alpha_high: {:?}", alpha_low, alpha_high); - - // Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. - let (a, b) = Self::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); + // Liquid Alpha is enabled, compute the liquid alphas matrix. + let alphas: Vec> = + Self::compute_liquid_alpha_values(netuid, weights, bonds, consensus); + log::trace!("alphas: {:?}", &alphas); - // Compute the alpha values using the logistic function parameters. - let alpha = Self::compute_alpha_values(&consensus, a, b); - - // Clamp the alpha values between alpha_high and alpha_low. - let clamped_alpha = Self::clamp_alpha_values(alpha, alpha_high, alpha_low); - - // Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. - Self::compute_ema_bonds_with_liquid_alpha_sparse( - &bonds_delta, - &bonds, - clamped_alpha, - ) - } else { - log::trace!("Using Bonds Moving Average"); - - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid) - } + // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. + mat_ema_alpha(weights, bonds, &alphas) } else { - log::trace!("Using Bonds Moving Average"); + // Liquid Alpha is disabled, compute the liquid alpha value. + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid) + // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. + mat_ema(weights, bonds, alpha) } } - /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting. + /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting for a sparse matrix. /// /// # Args: /// * `netuid` - The network ID. - /// * `consensus` - A vector of consensus values. - /// * `bonds_delta` - A vector of bond deltas. + /// * `weights` - A vector of weights. /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// * `active_stake` - A vector of active stake values. /// /// # Returns: /// A vector of EMA bonds. - pub fn compute_ema_bonds( + pub fn compute_bonds_sparse( netuid: u16, - consensus: Vec, - bonds_delta: Vec>, - bonds: Vec>, - ) -> Vec> { + weights: &[Vec<(u16, I32F32)>], + bonds: &[Vec<(u16, I32F32)>], + consensus: &[I32F32], + ) -> Vec> { // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1212,46 +1102,181 @@ impl Pallet { .iter() .any(|&c| c != I32F32::saturating_from_num(0)) { - // Calculate the 75th percentile (high) and 25th percentile (low) of the consensus values. - let consensus_high = quantile(&consensus, 0.75); - let consensus_low = quantile(&consensus, 0.25); + // Liquid Alpha is enabled, compute the liquid alphas matrix. + let alphas: Vec> = + Self::compute_liquid_alpha_values_sparse(netuid, weights, bonds, consensus); + log::trace!("alphas: {:?}", &alphas); - // Further check if the high and low consensus values meet the required conditions. - if (consensus_high > consensus_low) || consensus_high != 0 || consensus_low < 0 { - log::trace!("Using Liquid Alpha"); + // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. + mat_ema_alpha_sparse(weights, bonds, &alphas) + } else { + // Liquid Alpha is disabled, compute the liquid alpha value. + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); - // Get the high and low alpha values for the network. - let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - log::trace!("alpha_low: {:?} alpha_high: {:?}", alpha_low, alpha_high); + // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. + mat_ema_sparse(weights, bonds, alpha) + } + } - // Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. - let (a, b) = Self::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); + /// Compute liquid alphas matrix + /// There is a separate alpha param for each validator-miner binding + /// + /// # Args: + /// * `netuid` - The network ID. + /// * `weights` - A vector of weights. + /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// + /// # Returns: + /// A matrix of alphas + pub fn compute_liquid_alpha_values( + netuid: u16, + weights: &[Vec], // current epoch weights + bonds: &[Vec], // previous epoch bonds + consensus: &[I32F32], // previous epoch consensus weights + ) -> Vec> { + assert!(weights.len() == bonds.len()); - // Compute the alpha values using the logistic function parameters. - let alpha = Self::compute_alpha_values(&consensus, a, b); + // Get the high and low alpha values for the network. + let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); + let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - // Clamp the alpha values between alpha_high and alpha_low. - let clamped_alpha = Self::clamp_alpha_values(alpha, alpha_high, alpha_low); + let mut alphas = Vec::new(); - // Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. - Self::compute_ema_bonds_with_liquid_alpha(&bonds_delta, &bonds, clamped_alpha) - } else { - log::trace!("Using Bonds Moving Average"); + for (w_row, b_row) in weights.iter().zip(bonds.iter()) { + let mut row_alphas = Vec::new(); - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid) + for ((weight, bond), consensus_val) in + w_row.iter().zip(b_row.iter()).zip(consensus.iter()) + { + let alpha = Self::alpha_sigmoid( + *consensus_val, + *weight, + *bond, + alpha_low, + alpha_high, + alpha_sigmoid_steepness, + ); + row_alphas.push(alpha); } - } else { - log::trace!("Using Bonds Moving Average"); + alphas.push(row_alphas); + } + alphas + } - // Compute the EMA of bonds using a normal alpha value. - Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid) + /// Compute liquid alphas sparse matrix + /// There is a separate alpha param for each validator-miner binding + /// + /// # Args: + /// * `netuid` - The network ID. + /// * `weights` - A vector of weights. + /// * `bonds` - A vector of bonds. + /// * `consensus` - A vector of consensus values. + /// + /// # Returns: + /// A dense matrix of alphas + pub fn compute_liquid_alpha_values_sparse( + netuid: u16, + weights: &[Vec<(u16, I32F32)>], // current epoch weights + bonds: &[Vec<(u16, I32F32)>], // previous epoch bonds + consensus: &[I32F32], // previous epoch consensus weights + ) -> Vec> { + assert!(weights.len() == bonds.len()); + + let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); + let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); + + let mut alphas = Vec::with_capacity(consensus.len()); + let zero = I32F32::from_num(0.0); + + // iterate over rows + for (w_row, b_row) in weights.iter().zip(bonds.iter()) { + let mut row_alphas = Vec::with_capacity(w_row.len()); + let mut w_iter = w_row.iter().peekable(); + let mut b_iter = b_row.iter().peekable(); + for (j_pos, consensus_val) in consensus.iter().enumerate() { + let j = j_pos as u16; + + let mut weight = zero; + while let Some(&&(i, val)) = w_iter.peek() { + if i < j { + w_iter.next(); + } else { + if i == j { + weight = val; + } + break; + } + } + + let mut bond = zero; + while let Some(&&(i, val)) = b_iter.peek() { + if i < j { + b_iter.next(); + } else { + if i == j { + bond = val; + } + break; + } + } + + let alpha = Self::alpha_sigmoid( + *consensus_val, + weight, + bond, + alpha_low, + alpha_high, + alpha_sigmoid_steepness, + ); + row_alphas.push(alpha); + } + alphas.push(row_alphas); } + alphas + } + + /// Helper function to compute the alpha value using a sigmoid function. + pub fn alpha_sigmoid( + consensus: I32F32, + weight: I32F32, + bond: I32F32, + alpha_low: I32F32, + alpha_high: I32F32, + alpha_sigmoid_steepness: I32F32, + ) -> I32F32 { + let zero = I32F32::from_num(0.0); + let one = I32F32::from_num(1.0); + + let diff_buy = clamp_value(weight.saturating_sub(consensus), zero, one); + let diff_sell = clamp_value(bond.saturating_sub(weight), zero, one); + let combined_diff = if weight >= bond { diff_buy } else { diff_sell }; + + // sigmoid = 1. / (1. + e^(-steepness * (combined_diff - 0.5))) + let sigmoid = one.saturating_div( + one.saturating_add(safe_exp( + I32F32::from_num(-1).saturating_mul( + alpha_sigmoid_steepness + .saturating_mul(combined_diff.saturating_sub(I32F32::from_num(0.5))), + ), + )), + ); + let alpha = + alpha_low.saturating_add(sigmoid.saturating_mul(alpha_high.saturating_sub(alpha_low))); + + clamp_value(alpha, alpha_low, alpha_high) + } + + pub fn compute_disabled_liquid_alpha(netuid: u16) -> I32F32 { + // Retrieve the bonds moving average for the given network ID and scale it down. + let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) + .saturating_div(I64F64::from_num(1_000_000)); + + // Calculate the alpha value for the EMA calculation. + // Alpha is derived by subtracting the scaled bonds moving average from 1. + let alpha: I32F32 = + I32F32::from_num(1).saturating_sub(I32F32::from_num(bonds_moving_average)); + alpha } pub fn do_set_alpha_values( @@ -1294,4 +1319,39 @@ impl Pallet { ); Ok(()) } + + pub fn do_reset_bonds(netuid: u16, account_id: &T::AccountId) -> Result<(), DispatchError> { + // check bonds reset enabled for this subnet + let bonds_reset_enabled: bool = Self::get_bonds_reset(netuid); + if !bonds_reset_enabled { + return Ok(()); + } + + if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, account_id) { + for (i, bonds_vec) in + as IterableStorageDoubleMap>>::iter_prefix( + netuid, + ) + { + Bonds::::insert( + netuid, + i, + bonds_vec + .clone() + .iter() + .filter(|(j, _)| *j != uid) + .collect::>(), + ); + } + log::debug!("Reset bonds for {:?}, netuid {:?}", account_id, netuid); + } else { + log::warn!( + "Uid not found for {:?}, netuid {:?} - skipping bonds reset", + account_id, + netuid + ); + } + + Ok(()) + } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 2e0b479c0f..2a36458e4f 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -4,9 +4,9 @@ // Edit this file to define custom logic or remove it if it is not needed. // Learn more about FRAME and the core library of Substrate FRAME pallets: // -pub use pallet::*; use frame_system::{self as system, ensure_signed}; +pub use pallet::*; use frame_support::{ dispatch::{self, DispatchInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo}, @@ -641,6 +641,11 @@ pub mod pallet { T::InitialRho::get() } #[pallet::type_value] + /// Default value for alpha sigmoid steepness. + pub fn DefaultAlphaSigmoidSteepness() -> u16 { + T::InitialAlphaSigmoidSteepness::get() + } + #[pallet::type_value] /// Default value for kappa parameter. pub fn DefaultKappa() -> u16 { T::InitialKappa::get() @@ -695,8 +700,13 @@ pub mod pallet { pub fn DefaultBondsPenalty() -> u16 { T::InitialBondsPenalty::get() } + /// Default value for bonds reset - will not reset bonds #[pallet::type_value] + pub fn DefaultBondsResetOn() -> bool { + T::InitialBondsResetOn::get() + } /// Default validator prune length. + #[pallet::type_value] pub fn DefaultValidatorPruneLen() -> u64 { T::InitialValidatorPruneLen::get() } @@ -800,17 +810,27 @@ pub mod pallet { false } #[pallet::type_value] + /// -- ITEM (switches liquid alpha on) + pub fn DefaultYuma3() -> bool { + false + } + #[pallet::type_value] /// (alpha_low: 0.7, alpha_high: 0.9) pub fn DefaultAlphaValues() -> (u16, u16) { (45875, 58982) } - #[pallet::type_value] /// Default value for coldkey swap schedule duration pub fn DefaultColdkeySwapScheduleDuration() -> BlockNumberFor { T::InitialColdkeySwapScheduleDuration::get() } + #[pallet::type_value] + /// Default value for coldkey swap reschedule duration + pub fn DefaultColdkeySwapRescheduleDuration() -> BlockNumberFor { + T::InitialColdkeySwapRescheduleDuration::get() + } + #[pallet::type_value] /// Default value for applying pending items (e.g. childkeys). pub fn DefaultPendingCooldown() -> u64 { @@ -875,6 +895,14 @@ pub mod pallet { 360 } + #[pallet::type_value] + /// Default value for coldkey swap scheduled + pub fn DefaultColdkeySwapScheduled() -> (BlockNumberFor, T::AccountId) { + let default_account = T::AccountId::decode(&mut TrailingZeroInput::zeroes()) + .expect("trailing zeroes always produce a valid account ID; qed"); + (BlockNumberFor::::from(0_u32), default_account) + } + #[pallet::type_value] /// Default value for setting subnet owner hotkey rate limit pub fn DefaultSetSNOwnerHotkeyRateLimit() -> u64 { @@ -889,6 +917,10 @@ pub mod pallet { pub type ColdkeySwapScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapScheduleDuration>; + #[pallet::storage] + pub type ColdkeySwapRescheduleDuration = + StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapRescheduleDuration>; + #[pallet::storage] pub type DissolveNetworkScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultDissolveNetworkScheduleDuration>; @@ -1085,9 +1117,15 @@ pub mod pallet { pub type OwnedHotkeys = StorageMap<_, Blake2_128Concat, T::AccountId, Vec, ValueQuery>; - #[pallet::storage] // --- DMAP ( cold ) --> () | Maps coldkey to if a coldkey swap is scheduled. - pub type ColdkeySwapScheduled = - StorageMap<_, Blake2_128Concat, T::AccountId, (), ValueQuery>; + #[pallet::storage] // --- DMAP ( cold ) --> (block_expected, new_coldkey) | Maps coldkey to the block to swap at and new coldkey. + pub type ColdkeySwapScheduled = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + (BlockNumberFor, T::AccountId), + ValueQuery, + DefaultColdkeySwapScheduled, + >; #[pallet::storage] // --- DMAP ( hot, netuid ) --> alpha | Returns the total amount of alpha a hotkey owns. pub type TotalHotkeyAlpha = StorageDoubleMap< @@ -1293,6 +1331,10 @@ pub mod pallet { /// --- MAP ( netuid ) --> Rho pub type Rho = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultRho>; #[pallet::storage] + /// --- MAP ( netuid ) --> AlphaSigmoidSteepness + pub type AlphaSigmoidSteepness = + StorageMap<_, Identity, u16, u16, ValueQuery, DefaultAlphaSigmoidSteepness>; + #[pallet::storage] /// --- MAP ( netuid ) --> Kappa pub type Kappa = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultKappa>; #[pallet::storage] @@ -1346,6 +1388,10 @@ pub mod pallet { /// --- MAP ( netuid ) --> bonds_penalty pub type BondsPenalty = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultBondsPenalty>; + #[pallet::storage] + /// --- MAP ( netuid ) --> bonds_reset + pub type BondsResetOn = + StorageMap<_, Identity, u16, bool, ValueQuery, DefaultBondsResetOn>; /// --- MAP ( netuid ) --> weights_set_rate_limit #[pallet::storage] pub type WeightsSetRateLimit = @@ -1422,6 +1468,9 @@ pub mod pallet { pub type LiquidAlphaOn = StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultLiquidAlpha>; #[pallet::storage] + /// --- MAP ( netuid ) --> Whether or not Yuma3 is enabled + pub type Yuma3On = StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultYuma3>; + #[pallet::storage] /// MAP ( netuid ) --> (alpha_low, alpha_high) pub type AlphaValues = StorageMap<_, Identity, u16, (u16, u16), ValueQuery, DefaultAlphaValues>; @@ -1779,6 +1828,7 @@ pub enum CustomTransactionError { ServingRateLimitExceeded, InvalidPort, BadRequest, + ZeroMaxAmount, } impl From for u8 { @@ -1799,6 +1849,7 @@ impl From for u8 { CustomTransactionError::ServingRateLimitExceeded => 12, CustomTransactionError::InvalidPort => 13, CustomTransactionError::BadRequest => 255, + CustomTransactionError::ZeroMaxAmount => 14, } } } @@ -2075,8 +2126,13 @@ where .into(); } - // Calcaulate the maximum amount that can be executed with price limit - let max_amount = Pallet::::get_max_amount_add(*netuid, *limit_price); + // Calculate the maximum amount that can be executed with price limit + let Ok(max_amount) = Pallet::::get_max_amount_add(*netuid, *limit_price) else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2116,8 +2172,14 @@ where limit_price, allow_partial, }) => { - // Calcaulate the maximum amount that can be executed with price limit - let max_amount = Pallet::::get_max_amount_remove(*netuid, *limit_price); + // Calculate the maximum amount that can be executed with price limit + let Ok(max_amount) = Pallet::::get_max_amount_remove(*netuid, *limit_price) + else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2170,8 +2232,13 @@ where .into(); } - //Calculate the maximum amount that can be executed with price limit - let max_amount = Pallet::::get_max_amount_add(*netuid, *limit_price); + // Calculate the maximum amount that can be executed with price limit + let Ok(max_amount) = Pallet::::get_max_amount_add(*netuid, *limit_price) else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2212,7 +2279,13 @@ where allow_partial, }) => { // Calculate the maximum amount that can be executed with price limit - let max_amount = Pallet::::get_max_amount_remove(*netuid, *limit_price); + let Ok(max_amount) = Pallet::::get_max_amount_remove(*netuid, *limit_price) + else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( @@ -2335,11 +2408,16 @@ where } // Get the max amount possible to exchange - let max_amount = Pallet::::get_max_amount_move( + let Ok(max_amount) = Pallet::::get_max_amount_move( *origin_netuid, *destination_netuid, *limit_price, - ); + ) else { + return InvalidTransaction::Custom( + CustomTransactionError::ZeroMaxAmount.into(), + ) + .into(); + }; // Fully validate the user input Self::result_to_validity( diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index cf4d97b65b..4377d9f016 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -96,12 +96,18 @@ mod config { /// Initial bonds penalty. #[pallet::constant] type InitialBondsPenalty: Get; + /// Initial bonds reset. + #[pallet::constant] + type InitialBondsResetOn: Get; /// Initial target registrations per interval. #[pallet::constant] type InitialTargetRegistrationsPerInterval: Get; /// Rho constant. #[pallet::constant] type InitialRho: Get; + /// AlphaSigmoidSteepness constant. + #[pallet::constant] + type InitialAlphaSigmoidSteepness: Get; /// Kappa constant. #[pallet::constant] type InitialKappa: Get; @@ -195,12 +201,17 @@ mod config { /// A flag to indicate if Liquid Alpha is enabled. #[pallet::constant] type LiquidAlphaOn: Get; + /// A flag to indicate if Yuma3 is enabled. + type Yuma3On: Get; // /// Initial hotkey emission tempo. // #[pallet::constant] // type InitialHotkeyEmissionTempo: Get; /// Coldkey swap schedule duartion. #[pallet::constant] type InitialColdkeySwapScheduleDuration: Get>; + /// Coldkey swap reschedule duration. + #[pallet::constant] + type InitialColdkeySwapRescheduleDuration: Get>; /// Dissolve network schedule duration #[pallet::constant] type InitialDissolveNetworkScheduleDuration: Get>; diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 98b83791e8..637042c456 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -152,9 +152,9 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(72_300_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -235,9 +235,9 @@ mod dispatches { /// - The revealed hash does not match any committed hash. /// #[pallet::call_index(97)] - #[pallet::weight((Weight::from_parts(103_000_000, 0) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(122_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn reveal_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -331,9 +331,9 @@ mod dispatches { /// * `InvalidInputLengths`: /// - The input vectors are of mismatched lengths. #[pallet::call_index(98)] - #[pallet::weight((Weight::from_parts(367_612_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(420_500_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_reveal_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -454,7 +454,7 @@ mod dispatches { /// - The hotkey we are delegating is not owned by the calling coldket. /// #[pallet::call_index(1)] - #[pallet::weight((Weight::from_parts(4_428_000, 0) + #[pallet::weight((Weight::from_parts(4_709_000, 0) .saturating_add(T::DbWeight::get().reads(0)) .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::No))] pub fn become_delegate(_origin: OriginFor, _hotkey: T::AccountId) -> DispatchResult { @@ -889,7 +889,7 @@ mod dispatches { /// - The seal is incorrect. /// #[pallet::call_index(6)] - #[pallet::weight((Weight::from_parts(192_000_000, 0) + #[pallet::weight((Weight::from_parts(216_200_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::No))] pub fn register( @@ -964,10 +964,9 @@ mod dispatches { /// /// Weight is calculated based on the number of database reads and writes. #[pallet::call_index(71)] - #[pallet::weight((Weight::from_parts(127_713_000, 0) - .saturating_add(Weight::from_parts(111_100_000, 11645)) - .saturating_add(T::DbWeight::get().reads(18)) - .saturating_add(T::DbWeight::get().writes(12)), DispatchClass::Operational, Pays::No))] + #[pallet::weight((Weight::from_parts(179_500_000, 0) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(9)), DispatchClass::Operational, Pays::No))] pub fn swap_coldkey( origin: OriginFor, old_coldkey: T::AccountId, @@ -1327,18 +1326,23 @@ mod dispatches { /// - Consider adding checks to prevent scheduling too far into the future. /// TODO: Benchmark this call #[pallet::call_index(73)] - #[pallet::weight((Weight::from_parts(119_000_000, 0) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + #[pallet::weight((Weight::from_parts(44_520_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Operational, Pays::Yes))] pub fn schedule_swap_coldkey( origin: OriginFor, new_coldkey: T::AccountId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - ensure!( - !ColdkeySwapScheduled::::contains_key(&who), - Error::::SwapAlreadyScheduled - ); + let current_block = >::block_number(); + + // If the coldkey has a scheduled swap, check if we can reschedule it + if ColdkeySwapScheduled::::contains_key(&who) { + let (scheduled_block, _scheduled_coldkey) = ColdkeySwapScheduled::::get(&who); + let reschedule_duration = ColdkeySwapRescheduleDuration::::get(); + let redo_when = scheduled_block.saturating_add(reschedule_duration); + ensure!(redo_when <= current_block, Error::::SwapAlreadyScheduled); + } // Calculate the swap cost and ensure sufficient balance let swap_cost = Self::get_key_swap_cost(); @@ -1369,7 +1373,7 @@ mod dispatches { ) .map_err(|_| Error::::FailedToSchedule)?; - ColdkeySwapScheduled::::insert(&who, ()); + ColdkeySwapScheduled::::insert(&who, (when, new_coldkey.clone())); // Emit the SwapScheduled event Self::deposit_event(Event::ColdkeySwapScheduled { old_coldkey: who.clone(), @@ -1715,9 +1719,9 @@ mod dispatches { /// May emit a `StakeSwapped` event on success. #[pallet::call_index(87)] #[pallet::weight(( - Weight::from_parts(190_100_000, 0) - .saturating_add(T::DbWeight::get().reads(13)) - .saturating_add(T::DbWeight::get().writes(9)), + Weight::from_parts(221_600_000, 0) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(16)), DispatchClass::Operational, Pays::No ))] @@ -1780,9 +1784,9 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(88)] - #[pallet::weight((Weight::from_parts(91_010_000, 0) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(6)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(159_200_000, 0) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(10)), DispatchClass::Normal, Pays::No))] pub fn add_stake_limit( origin: OriginFor, hotkey: T::AccountId, @@ -1844,9 +1848,9 @@ mod dispatches { /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. /// #[pallet::call_index(89)] - #[pallet::weight((Weight::from_parts(172_100_000, 0) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(9)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(192_600_000, 0) + .saturating_add(T::DbWeight::get().reads(18)) + .saturating_add(T::DbWeight::get().writes(10)), DispatchClass::Normal, Pays::No))] pub fn remove_stake_limit( origin: OriginFor, hotkey: T::AccountId, @@ -1888,9 +1892,9 @@ mod dispatches { /// May emit a `StakeSwapped` event on success. #[pallet::call_index(90)] #[pallet::weight(( - Weight::from_parts(162_400_000, 0) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(9)), + Weight::from_parts(232_000_000, 0) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(16)), DispatchClass::Operational, Pays::No ))] diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 089c741c33..2a8e5bc346 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -4,6 +4,7 @@ use frame_support::pallet_macros::pallet_section; /// This can later be imported into the pallet using [`import_section`]. #[pallet_section] mod errors { + #[derive(PartialEq)] #[pallet::error] pub enum Error { /// The subnet does not exist. @@ -209,5 +210,9 @@ mod errors { InvalidRecoveredPublicKey, /// SubToken disabled now SubtokenDisabled, + /// Zero max stake amount + ZeroMaxStakeAmount, + /// Invalid netuid duplication + SameNetuid, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index ccbfed9eff..9849a517ee 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -67,6 +67,8 @@ mod events { ActivityCutoffSet(u16, u16), /// Rho value is set. RhoSet(u16, u16), + /// steepness of the sigmoid used to compute alpha values. + AlphaSigmoidSteepnessSet(u16, u16), /// Kappa is set for a subnet. KappaSet(u16, u16), /// minimum allowed weight is set for a subnet. @@ -83,6 +85,8 @@ mod events { BondsMovingAverageSet(u16, u64), /// bonds penalty is set for a subnet. BondsPenaltySet(u16, u16), + /// bonds reset is set for a subnet. + BondsResetOnSet(u16, bool), /// setting the max number of allowed validators on a subnet. MaxAllowedValidatorsSet(u16, u16), /// the axon server information is added to the network. diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 4d26994f05..78de392218 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -100,7 +100,10 @@ mod hooks { // Set subtoken enabled for all existed subnets .saturating_add(migrations::migrate_set_subtoken_enabled::migrate_set_subtoken_enabled::()) // Remove all entries in TotalHotkeyColdkeyStakesThisInterval - .saturating_add(migrations::migrate_remove_total_hotkey_coldkey_stakes_this_interval::migrate_remove_total_hotkey_coldkey_stakes_this_interval::()); + .saturating_add(migrations::migrate_remove_total_hotkey_coldkey_stakes_this_interval::migrate_remove_total_hotkey_coldkey_stakes_this_interval::()) + // Wipe the deprecated RateLimit storage item in the commitments pallet + .saturating_add(migrations::migrate_remove_commitments_rate_limit::migrate_remove_commitments_rate_limit::()); + weight // Remove all entries in orphaned storage items .saturating_add( @@ -110,7 +113,9 @@ mod hooks { // Reset bonds moving average .saturating_add(migrations::migrate_reset_bonds_moving_average::migrate_reset_bonds_moving_average::()) // Reset max burn - .saturating_add(migrations::migrate_reset_max_burn::migrate_reset_max_burn::()); + .saturating_add(migrations::migrate_reset_max_burn::migrate_reset_max_burn::()) + // Migrate ColdkeySwapScheduled structure to new format + .saturating_add(migrations::migrate_coldkey_swap_scheduled::migrate_coldkey_swap_scheduled::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_coldkey_swap_scheduled.rs b/pallets/subtensor/src/migrations/migrate_coldkey_swap_scheduled.rs new file mode 100644 index 0000000000..e15f468ddc --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_coldkey_swap_scheduled.rs @@ -0,0 +1,78 @@ +use super::*; +use crate::AccountIdOf; +use frame_support::{ + pallet_prelude::{Blake2_128Concat, ValueQuery}, + storage_alias, + traits::Get, + weights::Weight, +}; +pub use frame_system::pallet_prelude::BlockNumberFor; +use scale_info::prelude::string::String; +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_coldkey_swap_scheduled_format { + use super::*; + + #[storage_alias] + pub(super) type ColdkeySwapScheduled = + StorageMap, Blake2_128Concat, AccountIdOf, (), ValueQuery>; +} + +/// Migrate the ColdkeySwapScheduled map to the new storage format +pub fn migrate_coldkey_swap_scheduled() -> Weight { + use deprecated_coldkey_swap_scheduled_format as old; + + let migration_name = b"migrate_coldkey_swap_scheduled".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Migrate ColdkeySwapScheduled map + // ------------------------------ + + let curr_keys: Vec> = old::ColdkeySwapScheduled::::iter_keys().collect(); + + // Remove any undecodable entries + for coldkey in &curr_keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if old::ColdkeySwapScheduled::::try_get(coldkey).is_err() { + old::ColdkeySwapScheduled::::remove(coldkey); + log::warn!( + "Was unable to decode old coldkey_swap_scheduled for coldkey {:?}", + &coldkey + ); + } + } + + let default_value = DefaultColdkeySwapScheduled::::get(); + ColdkeySwapScheduled::::translate::<(), _>(|_coldkey: AccountIdOf, _: ()| { + Some((default_value.0, default_value.1.clone())) + }); + // write once for each item in the map, no matter remove or translate + weight.saturating_accrue(T::DbWeight::get().writes(curr_keys.len() as u64)); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_remove_commitments_rate_limit.rs b/pallets/subtensor/src/migrations/migrate_remove_commitments_rate_limit.rs new file mode 100644 index 0000000000..b32d4edc9f --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_commitments_rate_limit.rs @@ -0,0 +1,54 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; +use sp_io::{KillStorageResult, hashing::twox_128, storage::clear_prefix}; + +pub fn migrate_remove_commitments_rate_limit() -> Weight { + let migration_name = b"migrate_remove_commitments_rate_limit".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------------------------------------- + // Step 1: Remove all entries under the `RateLimit` storage key + // ------------------------------------------------------------- + let mut rate_limit_prefix = Vec::new(); + rate_limit_prefix.extend_from_slice(&twox_128("Commitments".as_bytes())); + rate_limit_prefix.extend_from_slice(&twox_128("RateLimit".as_bytes())); + + let removal_result = clear_prefix(&rate_limit_prefix, Some(u32::MAX)); + let removed_entries = match removal_result { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::warn!("Failed to remove some `RateLimit` entries."); + removed as u64 + } + }; + + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries)); + log::info!("Removed {} entries from `RateLimit`.", removed_entries); + + // ------------------------------------------------------------- + // Step 2: Mark this migration as completed + // ------------------------------------------------------------- + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs b/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs index 2e67e456b7..5bb442af18 100644 --- a/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs +++ b/pallets/subtensor/src/migrations/migrate_reset_bonds_moving_average.rs @@ -24,14 +24,14 @@ pub fn migrate_reset_bonds_moving_average() -> Weight { ); // ------------------------------ - // Step 1: Reset all subnet's BondsMovingAverage to 975000 + // Step 1: Reset all subnet's BondsMovingAverage to 975000 if the value exceeds 975000 // ------------------------------ let mut reset_entries_count = 0u64; for netuid in BondsMovingAverage::::iter_keys() { BondsMovingAverage::::mutate(netuid, |average| { - *average = 975000; + *average = (*average).min(975000); }); reset_entries_count = reset_entries_count.saturating_add(1); } diff --git a/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs b/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs index 5cc5f2987b..a2662f7de3 100644 --- a/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs +++ b/pallets/subtensor/src/migrations/migrate_reset_max_burn.rs @@ -39,10 +39,7 @@ pub fn migrate_reset_max_burn() -> Weight { weight = weight .saturating_add(T::DbWeight::get().reads_writes(reset_entries_count, reset_entries_count)); - log::info!( - "Reset {} subnets from BondsMovingAverage.", - reset_entries_count - ); + log::info!("Reset {} subnets from MaxBurn.", reset_entries_count); // ------------------------------ // Step 2: Mark Migration as Completed diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 824d8d4706..5c6347034f 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -4,6 +4,7 @@ use sp_io::KillStorageResult; use sp_io::hashing::twox_128; use sp_io::storage::clear_prefix; pub mod migrate_chain_identity; +pub mod migrate_coldkey_swap_scheduled; pub mod migrate_commit_reveal_v2; pub mod migrate_create_root_network; pub mod migrate_delete_subnet_21; @@ -14,6 +15,7 @@ pub mod migrate_init_total_issuance; pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; +pub mod migrate_remove_commitments_rate_limit; pub mod migrate_remove_stake_map; pub mod migrate_remove_total_hotkey_coldkey_stakes_this_interval; pub mod migrate_remove_unused_maps_and_values; diff --git a/pallets/subtensor/src/staking/add_stake.rs b/pallets/subtensor/src/staking/add_stake.rs index ad8c356d50..8886806cd9 100644 --- a/pallets/subtensor/src/staking/add_stake.rs +++ b/pallets/subtensor/src/staking/add_stake.rs @@ -288,7 +288,7 @@ impl Pallet { ); // 2. Calculate the maximum amount that can be executed with price limit - let max_amount = Self::get_max_amount_add(netuid, limit_price); + let max_amount = Self::get_max_amount_add(netuid, limit_price)?; let mut possible_stake = stake_to_be_added; if possible_stake > max_amount { possible_stake = max_amount; @@ -329,29 +329,29 @@ impl Pallet { } // Returns the maximum amount of RAO that can be executed with price limit - pub fn get_max_amount_add(netuid: u16, limit_price: u64) -> u64 { + pub fn get_max_amount_add(netuid: u16, limit_price: u64) -> Result> { // Corner case: root and stao // There's no slippage for root or stable subnets, so if limit price is 1e9 rao or // higher, then max_amount equals u64::MAX, otherwise it is 0. if (netuid == Self::get_root_netuid()) || (SubnetMechanism::::get(netuid)) == 0 { if limit_price >= 1_000_000_000 { - return u64::MAX; + return Ok(u64::MAX); } else { - return 0; + return Err(Error::ZeroMaxStakeAmount); } } // Corner case: SubnetAlphaIn is zero. Staking can't happen, so max amount is zero. let alpha_in = SubnetAlphaIn::::get(netuid); if alpha_in == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let alpha_in_u128 = alpha_in as u128; // Corner case: SubnetTAO is zero. Staking can't happen, so max amount is zero. let tao_reserve = SubnetTAO::::get(netuid); if tao_reserve == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let tao_reserve_u128 = tao_reserve as u128; @@ -364,7 +364,7 @@ impl Pallet { .saturating_mul(tao)) || (limit_price == 0u64) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } // Main case: return limit_price * SubnetAlphaIn - SubnetTAO @@ -375,10 +375,15 @@ impl Pallet { .checked_div(tao) .unwrap_or(0) .saturating_sub(tao_reserve_u128); + + if result == 0 { + return Err(Error::ZeroMaxStakeAmount); + } + if result < u64::MAX as u128 { - result as u64 + Ok(result as u64) } else { - u64::MAX + Ok(u64::MAX) } } } diff --git a/pallets/subtensor/src/staking/move_stake.rs b/pallets/subtensor/src/staking/move_stake.rs index 4198d29efc..ef576f3607 100644 --- a/pallets/subtensor/src/staking/move_stake.rs +++ b/pallets/subtensor/src/staking/move_stake.rs @@ -311,7 +311,7 @@ impl Pallet { ) -> Result> { // Calculate the maximum amount that can be executed let max_amount = if let Some(limit_price) = maybe_limit_price { - Self::get_max_amount_move(origin_netuid, destination_netuid, limit_price) + Self::get_max_amount_move(origin_netuid, destination_netuid, limit_price)? } else { alpha_amount }; @@ -401,7 +401,7 @@ impl Pallet { origin_netuid: u16, destination_netuid: u16, limit_price: u64, - ) -> u64 { + ) -> Result> { let tao: U64F64 = U64F64::saturating_from_num(1_000_000_000); // Corner case: both subnet IDs are root or stao @@ -413,9 +413,9 @@ impl Pallet { || (SubnetMechanism::::get(destination_netuid)) == 0) { if limit_price > tao.saturating_to_num::() { - return 0; + return Err(Error::ZeroMaxStakeAmount); } else { - return u64::MAX; + return Ok(u64::MAX); } } @@ -426,7 +426,7 @@ impl Pallet { && ((SubnetMechanism::::get(destination_netuid)) == 1) { if limit_price == 0 { - return u64::MAX; + return Ok(u64::MAX); } else { // The destination price is reverted because the limit_price is origin_price / destination_price let destination_subnet_price = tao @@ -450,7 +450,7 @@ impl Pallet { let subnet_tao_1 = SubnetTAO::::get(origin_netuid); let subnet_tao_2 = SubnetTAO::::get(destination_netuid); if (subnet_tao_1 == 0) || (subnet_tao_2 == 0) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let subnet_tao_1_float: U64F64 = U64F64::saturating_from_num(subnet_tao_1); let subnet_tao_2_float: U64F64 = U64F64::saturating_from_num(subnet_tao_2); @@ -459,7 +459,7 @@ impl Pallet { let alpha_in_1 = SubnetAlphaIn::::get(origin_netuid); let alpha_in_2 = SubnetAlphaIn::::get(destination_netuid); if (alpha_in_1 == 0) || (alpha_in_2 == 0) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let alpha_in_1_float: U64F64 = U64F64::saturating_from_num(alpha_in_1); let alpha_in_2_float: U64F64 = U64F64::saturating_from_num(alpha_in_2); @@ -474,12 +474,12 @@ impl Pallet { let current_price = Self::get_alpha_price(origin_netuid) .safe_div(Self::get_alpha_price(destination_netuid)); if limit_price_float > current_price { - return 0; + return Err(Error::ZeroMaxStakeAmount); } // Corner case: limit_price is zero if limit_price == 0 { - return u64::MAX; + return Ok(u64::MAX); } // Main case @@ -491,10 +491,16 @@ impl Pallet { let t1_over_sum: U64F64 = subnet_tao_1_float.safe_div(tao_sum); let t2_over_sum: U64F64 = subnet_tao_2_float.safe_div(tao_sum); - alpha_in_2_float + let final_result = alpha_in_2_float .saturating_mul(t1_over_sum) .safe_div(limit_price_float) .saturating_sub(alpha_in_1_float.saturating_mul(t2_over_sum)) - .saturating_to_num::() + .saturating_to_num::(); + + if final_result != 0 { + Ok(final_result) + } else { + Err(Error::ZeroMaxStakeAmount) + } } } diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index 3930a923a8..372b4f6e61 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -484,8 +484,8 @@ impl Pallet { alpha_unstaked ); - // 2. Calcaulate the maximum amount that can be executed with price limit - let max_amount = Self::get_max_amount_remove(netuid, limit_price); + // 2. Calculate the maximum amount that can be executed with price limit + let max_amount = Self::get_max_amount_remove(netuid, limit_price)?; let mut possible_alpha = alpha_unstaked; if possible_alpha > max_amount { possible_alpha = max_amount; @@ -614,36 +614,36 @@ impl Pallet { } // Returns the maximum amount of RAO that can be executed with price limit - pub fn get_max_amount_remove(netuid: u16, limit_price: u64) -> u64 { + pub fn get_max_amount_remove(netuid: u16, limit_price: u64) -> Result> { // Corner case: root and stao // There's no slippage for root or stable subnets, so if limit price is 1e9 rao or // higher, then max_amount equals u64::MAX, otherwise it is 0. if (netuid == Self::get_root_netuid()) || (SubnetMechanism::::get(netuid)) == 0 { if limit_price <= 1_000_000_000 { - return u64::MAX; + return Ok(u64::MAX); } else { - return 0; + return Err(Error::ZeroMaxStakeAmount); } } // Corner case: SubnetAlphaIn is zero. Staking can't happen, so max amount is zero. let alpha_in = SubnetAlphaIn::::get(netuid); if alpha_in == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let alpha_in_u128 = alpha_in as u128; // Corner case: SubnetTAO is zero. Staking can't happen, so max amount is zero. let tao_reserve = SubnetTAO::::get(netuid); if tao_reserve == 0 { - return 0; + return Err(Error::ZeroMaxStakeAmount); } let tao_reserve_u128 = tao_reserve as u128; // Corner case: limit_price == 0 (because there's division by limit price) // => can sell all if limit_price == 0 { - return u64::MAX; + return Ok(u64::MAX); } // Corner case: limit_price >= current_price (price cannot increase with unstaking) @@ -657,7 +657,7 @@ impl Pallet { .checked_div(alpha_in_u128) .unwrap_or(0) { - return 0; + return Err(Error::ZeroMaxStakeAmount); } // Main case: SubnetTAO / limit_price - SubnetAlphaIn @@ -670,9 +670,13 @@ impl Pallet { .saturating_sub(alpha_in_u128); if result < u64::MAX as u128 { - result as u64 + if result == 0 { + return Err(Error::ZeroMaxStakeAmount); + } + + Ok(result as u64) } else { - u64::MAX + Ok(u64::MAX) } } } diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index a928c53e31..65280c8a43 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -571,6 +571,14 @@ impl Pallet { netuid: u16, amount: u64, ) -> u64 { + if amount > 0 { + let mut staking_hotkeys = StakingHotkeys::::get(coldkey); + if !staking_hotkeys.contains(hotkey) { + staking_hotkeys.push(hotkey.clone()); + StakingHotkeys::::insert(coldkey, staking_hotkeys.clone()); + } + } + let mut alpha_share_pool = Self::get_alpha_share_pool(hotkey.clone(), netuid); // We expect to add a positive amount here. let actual_alpha = alpha_share_pool.update_value_for_one(coldkey, amount as i64); @@ -848,16 +856,9 @@ impl Pallet { actual_alpha = Self::increase_stake_for_hotkey_and_coldkey_on_subnet( hotkey, coldkey, netuid, alpha, ); - - // Step 4: Update the list of hotkeys staking for this coldkey - let mut staking_hotkeys = StakingHotkeys::::get(coldkey); - if !staking_hotkeys.contains(hotkey) { - staking_hotkeys.push(hotkey.clone()); - StakingHotkeys::::insert(coldkey, staking_hotkeys.clone()); - } } - // Step 5. Increase Tao reserves by the fee amount. + // Step 4. Increase Tao reserves by the fee amount. SubnetTAO::::mutate(netuid, |total| { *total = total.saturating_add(actual_fee); }); @@ -866,7 +867,7 @@ impl Pallet { }); LastColdkeyHotkeyStakeBlock::::insert(coldkey, hotkey, Self::get_current_block_as_u64()); - // Step 6. Deposit and log the staking event. + // Step 5. Deposit and log the staking event. Self::deposit_event(Event::StakeAdded( coldkey.clone(), hotkey.clone(), @@ -885,7 +886,7 @@ impl Pallet { actual_fee ); - // Step 7: Return the amount of alpha staked + // Step 6: Return the amount of alpha staked actual_alpha } @@ -999,7 +1000,7 @@ impl Pallet { /// pub fn validate_stake_transition( origin_coldkey: &T::AccountId, - _destination_coldkey: &T::AccountId, + destination_coldkey: &T::AccountId, origin_hotkey: &T::AccountId, destination_hotkey: &T::AccountId, origin_netuid: u16, @@ -1009,6 +1010,11 @@ impl Pallet { maybe_allow_partial: Option, check_transfer_toggle: bool, ) -> Result<(), Error> { + // Ensure stake transition is actually happening + if origin_coldkey == destination_coldkey && origin_hotkey == destination_hotkey { + ensure!(origin_netuid != destination_netuid, Error::::SameNetuid); + } + // Ensure that both subnets exist. ensure!( Self::if_subnet_exist(origin_netuid), diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index c954bd3665..1aae9c3a0c 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -3,518 +3,458 @@ use super::*; /// Returns the Unicode symbol as a Vec for a given netuid. impl Pallet { pub fn get_name_for_subnet(netuid: u16) -> Vec { - match netuid { - 0 => b"root".to_vec(), // Τ (Upper case Tau) - 1 => b"apex".to_vec(), // α (Alpha) - 2 => b"omron".to_vec(), // β (Beta) - 3 => b"templar".to_vec(), // γ (Gamma) - 4 => b"targon".to_vec(), // δ (Delta) - 5 => b"kaito".to_vec(), // ε (Epsilon) - 6 => b"infinite".to_vec(), // ζ (Zeta) - 7 => b"subvortex".to_vec(), // η (Eta) - 8 => b"ptn".to_vec(), // θ (Theta) - 9 => b"pretrain".to_vec(), // ι (Iota) - 10 => b"sturdy".to_vec(), // κ (Kappa) - 11 => b"dippy".to_vec(), // λ (Lambda) - 12 => b"horde".to_vec(), // μ (Mu) - 13 => b"dataverse".to_vec(), // ν (Nu) - 14 => b"palaidn".to_vec(), // ξ (Xi) - 15 => b"deval".to_vec(), // ο (Omicron) - 16 => b"bitads".to_vec(), // π (Pi) - 17 => b"3gen".to_vec(), // ρ (Rho) - 18 => b"cortex".to_vec(), // σ (Sigma) - 19 => b"inference".to_vec(), // t (Tau) - 20 => b"bitagent".to_vec(), // υ (Upsilon) - 21 => b"any-any".to_vec(), // φ (Phi) - 22 => b"meta".to_vec(), // χ (Chi) - 23 => b"social".to_vec(), // ψ (Psi) - 24 => b"omega".to_vec(), // ω (Omega) - 25 => b"protein".to_vec(), // א (Aleph) - 26 => b"alchemy".to_vec(), // ב (Bet) - 27 => b"compute".to_vec(), // ג (Gimel) - 28 => b"oracle".to_vec(), // ד (Dalet) - 29 => b"coldint".to_vec(), // ה (He) - 30 => b"bet".to_vec(), // ו (Vav) - 31 => b"naschain".to_vec(), // ז (Zayin) - 32 => b"itsai".to_vec(), // ח (Het) - 33 => b"ready".to_vec(), // ט (Tet) - 34 => b"mind".to_vec(), // י (Yod) - 35 => b"logic".to_vec(), // ך (Final Kaf) - 36 => b"automata".to_vec(), // כ (Kaf) - 37 => b"tuning".to_vec(), // ל (Lamed) - 38 => b"distributed".to_vec(), // ם (Final Mem) - 39 => b"edge".to_vec(), // מ (Mem) - 40 => b"chunk".to_vec(), // ן (Final Nun) - 41 => b"sportsensor".to_vec(), // נ (Nun) - 42 => b"masa".to_vec(), // ס (Samekh) - 43 => b"graphite".to_vec(), // ע (Ayin) - 44 => b"score".to_vec(), // ף (Final Pe) - 45 => b"gen42".to_vec(), // פ (Pe) - 46 => b"neural".to_vec(), // ץ (Final Tsadi) - 47 => b"condense".to_vec(), // צ (Tsadi) - 48 => b"nextplace".to_vec(), // ק (Qof) - 49 => b"automl".to_vec(), // ר (Resh) - 50 => b"audio".to_vec(), // ש (Shin) - 51 => b"celium".to_vec(), // ת (Tav) - 52 => b"dojo".to_vec(), // ا (Alif) - 53 => b"frontier".to_vec(), // ب (Ba) - 54 => b"safescan".to_vec(), // ت (Ta) - 55 => b"unknown".to_vec(), // ث (Tha) - 56 => b"gradients".to_vec(), // ج (Jim) - 57 => b"gaia".to_vec(), // ح (Ha) - 58 => b"dippy-speach".to_vec(), // خ (Kha) - 59 => b"agent-arena".to_vec(), // د (Dal) - 60 => b"unknown".to_vec(), // ذ (Dhal) - 61 => b"red team".to_vec(), // ر (Ra) - 62 => b"agentao".to_vec(), // ز (Zay) - 63 => b"lean-in".to_vec(), // س (Sin) - 64 => b"chutes".to_vec(), // ش (Shin) - 65 => b"sad".to_vec(), - 66 => b"dad".to_vec(), - 67 => b"ta".to_vec(), - 68 => b"dha".to_vec(), - 69 => b"ain".to_vec(), - 70 => b"ghayn".to_vec(), - 71 => b"fa".to_vec(), - 72 => b"qaf".to_vec(), - 73 => b"kaf".to_vec(), - 74 => b"lam".to_vec(), - 75 => b"mim".to_vec(), - 76 => b"nun".to_vec(), - 77 => b"ha".to_vec(), - 78 => b"waw".to_vec(), - 79 => b"ya".to_vec(), - 80 => b"alef".to_vec(), - 81 => b"fehu".to_vec(), - 82 => b"uruz".to_vec(), - 83 => b"thurisaz".to_vec(), - 84 => b"ansuz".to_vec(), - 85 => b"raidho".to_vec(), - 86 => b"kaunan".to_vec(), - 87 => b"cyr_yeru".to_vec(), - 88 => b"algiz".to_vec(), - 89 => b"berkanan".to_vec(), - 90 => b"ogham".to_vec(), - 91 => b"beith".to_vec(), - 92 => b"luis".to_vec(), - 93 => b"fearn".to_vec(), - 94 => b"sail".to_vec(), - 95 => b"nion".to_vec(), - 96 => b"forfeda".to_vec(), - 97 => b"ani".to_vec(), - 98 => b"bani".to_vec(), - 99 => b"gani".to_vec(), - 100 => b"doni".to_vec(), - 101 => b"eni".to_vec(), - 102 => b"vini".to_vec(), - 103 => b"ayp".to_vec(), - 104 => b"ben".to_vec(), - 105 => b"gim".to_vec(), - 106 => b"da".to_vec(), - 107 => b"ech".to_vec(), - 108 => b"za".to_vec(), - 109 => b"armeni".to_vec(), - 110 => b"grave".to_vec(), - 111 => b"io".to_vec(), - 112 => b"dje".to_vec(), - 113 => b"gje".to_vec(), - 114 => b"ie".to_vec(), - 115 => b"dze".to_vec(), - 116 => b"hard_sign".to_vec(), - 117 => b"alfa".to_vec(), - 118 => b"alfas".to_vec(), - 119 => b"vida".to_vec(), // Ⲃ (Vida, 119) - 120 => b"vida_small".to_vec(), // ⲃ (Small Vida, 120) - 121 => b"gamma".to_vec(), // Ⲅ (Gamma, 121) - 122 => b"gamma_small".to_vec(), // ⲅ (Small Gamma, 122) - 123 => b"brahmi_a".to_vec(), // 𑀀 (A, 123) - 124 => b"brahmi_aa".to_vec(), // 𑀁 (Aa, 124) - 125 => b"brahmi_i".to_vec(), // 𑀂 (I, 125) - 126 => b"brahmi_ii".to_vec(), // 𑀃 (Ii, 126) - 127 => b"brahmi_u".to_vec(), // 𑀅 (U, 127) - 128 => b"la".to_vec(), - 129 => b"va".to_vec(), - 130 => b"sha".to_vec(), - 131 => b"ssa".to_vec(), - 132 => b"sa".to_vec(), - 133 => b"ha".to_vec(), - 134 => b"glagolitic_az".to_vec(), // Ⰰ (Az, 134) - 135 => b"glagolitic_buky".to_vec(), // Ⰱ (Buky, 135) - 136 => b"glagolitic_vede".to_vec(), // Ⰲ (Vede, 136) - 137 => b"glagolitic_glagoli".to_vec(), // Ⰳ (Glagoli, 137) - 138 => b"glagolitic_dobro".to_vec(), // Ⰴ (Dobro, 138) - 139 => b"glagolitic_yest".to_vec(), // Ⰵ (Yest, 139) - 140 => b"glagolitic_zhivete".to_vec(), // Ⰶ (Zhivete, 140) - 141 => b"glagolitic_zemlja".to_vec(), // Ⰷ (Zemlja, 141) - 142 => b"glagolitic_izhe".to_vec(), // Ⰸ (Izhe, 142) - 143 => b"glagolitic_initial_izhe".to_vec(), // Ⰹ (Initial Izhe, 143) - 144 => b"glagolitic_i".to_vec(), // Ⰺ (I, 144) - 145 => b"glagolitic_djerv".to_vec(), // Ⰻ (Djerv, 145) - 146 => b"glagolitic_kako".to_vec(), // Ⰼ (Kako, 146) - 147 => b"glagolitic_ljudije".to_vec(), // Ⰽ (Ljudije, 147) - 148 => b"glagolitic_myse".to_vec(), // Ⰾ (Myse, 148) - 149 => b"glagolitic_nash".to_vec(), // Ⰿ (Nash, 149) - 150 => b"glagolitic_on".to_vec(), // Ⱀ (On, 150) - 151 => b"glagolitic_pokoj".to_vec(), // Ⱁ (Pokoj, 151) - 152 => b"glagolitic_rtsy".to_vec(), // Ⱂ (Rtsy, 152) - 153 => b"glagolitic_slovo".to_vec(), // Ⱃ (Slovo, 153) - 154 => b"glagolitic_tvrido".to_vec(), // Ⱄ (Tvrido, 154) - 155 => b"glagolitic_uku".to_vec(), // Ⱅ (Uku, 155) - 156 => b"glagolitic_fert".to_vec(), // Ⱆ (Fert, 156) - 157 => b"glagolitic_xrivi".to_vec(), // Ⱇ (Xrivi, 157) - 158 => b"glagolitic_ot".to_vec(), // Ⱈ (Ot, 158) - 159 => b"glagolitic_cy".to_vec(), // Ⱉ (Cy, 159) - 160 => b"glagolitic_shcha".to_vec(), // Ⱊ (Shcha, 160) - 161 => b"glagolitic_er".to_vec(), // Ⱋ (Er, 161) - 162 => b"glagolitic_yeru".to_vec(), // Ⱌ (Yeru, 162) - 163 => b"glagolitic_small_yer".to_vec(), // Ⱍ (Small Yer, 163) - 164 => b"glagolitic_yo".to_vec(), // Ⱎ (Yo, 164) - 165 => b"glagolitic_yu".to_vec(), // Ⱏ (Yu, 165) - 166 => b"glagolitic_ja".to_vec(), // Ⱐ (Ja, 166) - 167 => b"thai_ko_kai".to_vec(), // ก (Ko Kai, 167) - 168 => b"thai_kho_khai".to_vec(), // ข (Kho Khai, 168) - 169 => b"thai_kho_khuat".to_vec(), // ฃ (Kho Khuat, 169) - 170 => b"thai_kho_khon".to_vec(), // ค (Kho Khon, 170) - 171 => b"thai_kho_rakhang".to_vec(), // ฅ (Kho Rakhang, 171) - 172 => b"thai_kho_khwai".to_vec(), // ฆ (Kho Khwai, 172) - 173 => b"thai_ngo_ngu".to_vec(), // ง (Ngo Ngu, 173) - 174 => b"thai_cho_chan".to_vec(), // จ (Cho Chan, 174) - 175 => b"thai_cho_ching".to_vec(), // ฉ (Cho Ching, 175) - 176 => b"thai_cho_chang".to_vec(), // ช (Cho Chang, 176) - 177 => b"thai_so_so".to_vec(), // ซ (So So, 177) - 178 => b"thai_cho_choe".to_vec(), // ฌ (Cho Choe, 178) - 179 => b"thai_yo_ying".to_vec(), // ญ (Yo Ying, 179) - 180 => b"thai_do_chada".to_vec(), // ฎ (Do Chada, 180) - 181 => b"thai_to_patak".to_vec(), // ฏ (To Patak, 181) - 182 => b"thai_tho_than".to_vec(), // ฐ (Tho Than, 182) - 183 => b"thai_tho_nangmontho".to_vec(), // ฑ (Tho Nangmontho, 183) - 184 => b"thai_tho_phuthao".to_vec(), // ฒ (Tho Phuthao, 184) - 185 => b"thai_no_nen".to_vec(), // ณ (No Nen, 185) - 186 => b"thai_do_dek".to_vec(), // ด (Do Dek, 186) - 187 => b"thai_to_tao".to_vec(), // ต (To Tao, 187) - 188 => b"thai_tho_thung".to_vec(), // ถ (Tho Thung, 188) - 189 => b"thai_tho_thahan".to_vec(), // ท (Tho Thahan, 189) - 190 => b"thai_tho_thong".to_vec(), // ธ (Tho Thong, 190) - 191 => b"thai_no_nu".to_vec(), // น (No Nu, 191) - 192 => b"thai_bo_baimai".to_vec(), // บ (Bo Baimai, 192) - 193 => b"thai_po_pla".to_vec(), // ป (Po Pla, 193) - 194 => b"thai_pho_phung".to_vec(), // ผ (Pho Phung, 194) - 195 => b"thai_fo_fa".to_vec(), // ฝ (Fo Fa, 195) - 196 => b"thai_pho_phan".to_vec(), // พ (Pho Phan, 196) - 197 => b"thai_fo_fan".to_vec(), // ฟ (Fo Fan, 197) - 198 => b"thai_pho_samphao".to_vec(), // ภ (Pho Samphao, 198) - 199 => b"thai_mo_ma".to_vec(), // ม (Mo Ma, 199) - 200 => b"thai_yo_yak".to_vec(), // ย (Yo Yak, 200) - 201 => b"thai_ro_rua".to_vec(), // ร (Ro Rua, 201) - 202 => b"thai_lo_ling".to_vec(), // ล (Lo Ling, 202) - 203 => b"thai_wo_waen".to_vec(), // ว (Wo Waen, 203) - 204 => b"thai_so_sala".to_vec(), // ศ (So Sala, 204) - 205 => b"thai_so_rusi".to_vec(), // ษ (So Rusi, 205) - 206 => b"thai_so_sua".to_vec(), // ส (So Sua, 206) - 207 => b"thai_ho_hip".to_vec(), // ห (Ho Hip, 207) - 208 => b"thai_lo_chula".to_vec(), // ฬ (Lo Chula, 208) - 209 => b"thai_o_ang".to_vec(), // อ (O Ang, 209) - 210 => b"thai_ho_nokhuk".to_vec(), // ฮ (Ho Nokhuk, 210) - 211 => b"hangul_giyeok".to_vec(), // ㄱ (Giyeok, 211) - 212 => b"hangul_nieun".to_vec(), // ㄴ (Nieun, 212) - 213 => b"hangul_digeut".to_vec(), // ㄷ (Digeut, 213) - 214 => b"hangul_rieul".to_vec(), // ㄹ (Rieul, 214) - 215 => b"hangul_mieum".to_vec(), // ㅁ (Mieum, 215) - 216 => b"hangul_bieup".to_vec(), // ㅂ (Bieup, 216) - 217 => b"hangul_siot".to_vec(), // ㅅ (Siot, 217) - 218 => b"hangul_ieung".to_vec(), // ㅇ (Ieung, 218) - 219 => b"hangul_jieut".to_vec(), // ㅈ (Jieut, 219) - 220 => b"hangul_chieut".to_vec(), // ㅊ (Chieut, 220) - 221 => b"hangul_kieuk".to_vec(), // ㅋ (Kieuk, 221) - 222 => b"hangul_tieut".to_vec(), // ㅌ (Tieut, 222) - 223 => b"hangul_pieup".to_vec(), // ㅍ (Pieup, 223) - 224 => b"hangul_hieut".to_vec(), // ㅎ (Hieut, 224) - 225 => b"hangul_a".to_vec(), // ㅏ (A, 225) - 226 => b"hangul_ae".to_vec(), // ㅐ (Ae, 226) - 227 => b"hangul_ya".to_vec(), // ㅑ (Ya, 227) - 228 => b"hangul_yae".to_vec(), // ㅒ (Yae, 228) - 229 => b"hangul_eo".to_vec(), // ㅓ (Eo, 229) - 230 => b"hangul_e".to_vec(), // ㅔ (E, 230) - 231 => b"hangul_yeo".to_vec(), // ㅕ (Yeo, 231) - 232 => b"hangul_ye".to_vec(), // ㅖ (Ye, 232) - 233 => b"hangul_o".to_vec(), // ㅗ (O, 233) - 234 => b"hangul_wa".to_vec(), // ㅘ (Wa, 234) - 235 => b"hangul_wae".to_vec(), // ㅙ (Wae, 235) - 236 => b"hangul_oe".to_vec(), // ㅚ (Oe, 236) - 237 => b"hangul_yo".to_vec(), // ㅛ (Yo, 237) - 238 => b"hangul_u".to_vec(), // ㅜ (U, 238) - 239 => b"hangul_weo".to_vec(), // ㅝ (Weo, 239) - 240 => b"hangul_we".to_vec(), // ㅞ (We, 240) - 241 => b"hangul_wi".to_vec(), // ㅟ (Wi, 241) - 242 => b"hangul_yu".to_vec(), // ㅠ (Yu, 242) - 243 => b"hangul_eu".to_vec(), // ㅡ (Eu, 243) - 244 => b"hangul_ui".to_vec(), // ㅢ (Ui, 244) - 245 => b"hangul_i".to_vec(), // ㅣ (I, 245) - 246 => b"ethiopic_glottal_a".to_vec(), // አ (Glottal A, 246) - 247 => b"ethiopic_glottal_u".to_vec(), // ኡ (Glottal U, 247) - 248 => b"ethiopic_glottal_i".to_vec(), // ኢ (Glottal I, 248) - 249 => b"ethiopic_glottal_aa".to_vec(), // ኣ (Glottal Aa, 249) - 250 => b"ethiopic_glottal_e".to_vec(), // ኤ (Glottal E, 250) - 251 => b"ethiopic_glottal_ie".to_vec(), // እ (Glottal Ie, 251) - 252 => b"ethiopic_glottal_o".to_vec(), // ኦ (Glottal O, 252) - 253 => b"ethiopic_glottal_wa".to_vec(), // ኧ (Glottal Wa, 253) - 254 => b"ethiopic_wa".to_vec(), // ወ (Wa, 254) - 255 => b"ethiopic_wu".to_vec(), // ዉ (Wu, 255) - 256 => b"ethiopic_wi".to_vec(), // ዊ (Wi, 256) - 257 => b"ethiopic_waa".to_vec(), // ዋ (Waa, 257) - 258 => b"ethiopic_we".to_vec(), // ዌ (We, 258) - 259 => b"ethiopic_wye".to_vec(), // ው (Wye, 259) - 260 => b"ethiopic_wo".to_vec(), // ዎ (Wo, 260) - 261 => b"ethiopic_ko".to_vec(), // ኰ (Ko, 261) - 262 => b"ethiopic_ku".to_vec(), // ኱ (Ku, 262) - 263 => b"ethiopic_ki".to_vec(), // ኲ (Ki, 263) - 264 => b"ethiopic_kua".to_vec(), // ኳ (Kua, 264) - 265 => b"ethiopic_ke".to_vec(), // ኴ (Ke, 265) - 266 => b"ethiopic_kwe".to_vec(), // ኵ (Kwe, 266) - 267 => b"ethiopic_ko_alt".to_vec(), // ኶ (Ko, 267) - 268 => b"ethiopic_go".to_vec(), // ጐ (Go, 268) - 269 => b"ethiopic_gu".to_vec(), // ጑ (Gu, 269) - 270 => b"ethiopic_gi".to_vec(), // ጒ (Gi, 270) - 271 => b"ethiopic_gua".to_vec(), // መ (Gua, 271) - 272 => b"ethiopic_ge".to_vec(), // ጔ (Ge, 272) - 273 => b"ethiopic_gwe".to_vec(), // ጕ (Gwe, 273) - 274 => b"ethiopic_go_alt".to_vec(), // ጖ (Go, 274) - 275 => b"devanagari_a".to_vec(), // अ (A, 275) - 276 => b"devanagari_aa".to_vec(), // आ (Aa, 276) - 277 => b"devanagari_i".to_vec(), // इ (I, 277) - 278 => b"devanagari_ii".to_vec(), // ई (Ii, 278) - 279 => b"devanagari_u".to_vec(), // उ (U, 279) - 280 => b"devanagari_uu".to_vec(), // ऊ (Uu, 280) - 281 => b"devanagari_r".to_vec(), // ऋ (R, 281) - 282 => b"devanagari_e".to_vec(), // ए (E, 282) - 283 => b"devanagari_ai".to_vec(), // ऐ (Ai, 283) - 284 => b"devanagari_o".to_vec(), // ओ (O, 284) - 285 => b"devanagari_au".to_vec(), // औ (Au, 285) - 286 => b"devanagari_ka".to_vec(), // क (Ka, 286) - 287 => b"devanagari_kha".to_vec(), // ख (Kha, 287) - 288 => b"devanagari_ga".to_vec(), // ग (Ga, 288) - 289 => b"devanagari_gha".to_vec(), // घ (Gha, 289) - 290 => b"devanagari_nga".to_vec(), // ङ (Nga, 290) - 291 => b"devanagari_cha".to_vec(), // च (Cha, 291) - 292 => b"devanagari_chha".to_vec(), // छ (Chha, 292) - 293 => b"devanagari_ja".to_vec(), // ज (Ja, 293) - 294 => b"devanagari_jha".to_vec(), // झ (Jha, 294) - 295 => b"devanagari_nya".to_vec(), // ञ (Nya, 295) - 296 => b"devanagari_ta".to_vec(), // ट (Ta, 296) - 297 => b"devanagari_tha".to_vec(), // ठ (Tha, 297) - 298 => b"devanagari_da".to_vec(), // ड (Da, 298) - 299 => b"devanagari_dha".to_vec(), // ढ (Dha, 299) - 300 => b"devanagari_na".to_vec(), // ण (Na, 300) - 301 => b"devanagari_ta_alt".to_vec(), // त (Ta, 301) - 302 => b"devanagari_tha_alt".to_vec(), // थ (Tha, 302) - 303 => b"devanagari_da_alt".to_vec(), // द (Da, 303) - 304 => b"devanagari_dha_alt".to_vec(), // ध (Dha, 304) - 305 => b"devanagari_na_alt".to_vec(), // न (Na, 305) - 306 => b"devanagari_pa".to_vec(), // प (Pa, 306) - 307 => b"devanagari_pha".to_vec(), // फ (Pha, 307) - 308 => b"devanagari_ba".to_vec(), // ब (Ba, 308) - 309 => b"devanagari_bha".to_vec(), // भ (Bha, 309) - 310 => b"devanagari_ma".to_vec(), // म (Ma, 310) - 311 => b"devanagari_ya".to_vec(), // य (Ya, 311) - 312 => b"devanagari_ra".to_vec(), // र (Ra, 312) - 313 => b"devanagari_la".to_vec(), // ल (La, 313) - 314 => b"devanagari_va".to_vec(), // व (Va, 314) - 315 => b"devanagari_sha".to_vec(), // श (Sha, 315) - 316 => b"devanagari_ssa".to_vec(), // ष (Ssa, 316) - 317 => b"devanagari_sa".to_vec(), // स (Sa, 317) - 318 => b"devanagari_ha".to_vec(), // ह (Ha, 318) - 319 => b"katakana_a".to_vec(), // ア (A, 319) - 320 => b"kana_i".to_vec(), - 321 => b"kana_u".to_vec(), - 322 => b"kana_e".to_vec(), - 323 => b"kana_o".to_vec(), - 324 => b"kana_a".to_vec(), - 325 => b"kana_ki".to_vec(), - 326 => b"kana_ku".to_vec(), - 327 => b"kana_ke".to_vec(), - 328 => b"kana_ko".to_vec(), - 329 => b"kana_sa".to_vec(), - 330 => b"kana_shi".to_vec(), - 331 => b"kana_su".to_vec(), - 332 => b"kana_se".to_vec(), - 333 => b"kana_so".to_vec(), - 334 => b"kana_ta".to_vec(), - 335 => b"kana_chi".to_vec(), - 336 => b"kana_tsu".to_vec(), - 337 => b"kana_te".to_vec(), - 338 => b"kana_to".to_vec(), - 339 => b"kana_na".to_vec(), - 340 => b"kana_ni".to_vec(), - 341 => b"kana_nu".to_vec(), - 342 => b"kana_ne".to_vec(), - 343 => b"kana_no".to_vec(), - 344 => b"kana_ha".to_vec(), - 345 => b"kana_hi".to_vec(), - 346 => b"kana_fu".to_vec(), - 347 => b"kana_he".to_vec(), - 348 => b"kana_ho".to_vec(), - 349 => b"kana_ma".to_vec(), - 350 => b"kana_mi".to_vec(), - 351 => b"kana_mu".to_vec(), - 352 => b"kana_me".to_vec(), - 353 => b"kana_mo".to_vec(), - 354 => b"kana_ya".to_vec(), - 355 => b"kana_yu".to_vec(), - 356 => b"kana_yo".to_vec(), - 357 => b"kana_ra".to_vec(), - 358 => b"kana_ri".to_vec(), - 359 => b"kana_ru".to_vec(), - 360 => b"kana_re".to_vec(), - 361 => b"kana_ro".to_vec(), - 362 => b"kana_wa".to_vec(), - 363 => b"kana_wo".to_vec(), - 364 => b"kana_n".to_vec(), - 365 => b"ya".to_vec(), - 366 => b"yab".to_vec(), - 367 => b"yabh".to_vec(), - 368 => b"yag".to_vec(), - 369 => b"yagh".to_vec(), - 370 => b"yaj".to_vec(), - 371 => b"yach".to_vec(), - 372 => b"yad".to_vec(), - 373 => b"yadh".to_vec(), - 374 => b"yadhe".to_vec(), - 375 => b"yaz".to_vec(), - 376 => b"yazh".to_vec(), - 377 => b"yaf".to_vec(), - 378 => b"yak".to_vec(), - 379 => b"yakv".to_vec(), - 380 => b"yaq".to_vec(), - 381 => b"yah".to_vec(), - 382 => b"yahh".to_vec(), - 383 => b"yahl".to_vec(), - 384 => b"yahm".to_vec(), - 385 => b"yayn".to_vec(), - 386 => b"yakh".to_vec(), - 387 => b"yakl".to_vec(), - 388 => b"yahq".to_vec(), - 389 => b"yash".to_vec(), - 390 => b"yi".to_vec(), - 391 => b"yij".to_vec(), - 392 => b"yizh".to_vec(), - 393 => b"yink".to_vec(), - 394 => b"yal".to_vec(), - 395 => b"yam".to_vec(), - 396 => b"yan".to_vec(), - 397 => b"yang".to_vec(), - 398 => b"yany".to_vec(), - 399 => b"yap".to_vec(), - 400 => b"yu".to_vec(), - 401 => b"a".to_vec(), - 402 => b"aa".to_vec(), - 403 => b"i".to_vec(), - 404 => b"ii".to_vec(), - 405 => b"u".to_vec(), - 406 => b"uu".to_vec(), - 407 => b"r".to_vec(), - 408 => b"rr".to_vec(), - 409 => b"l".to_vec(), - 410 => b"ll".to_vec(), - 411 => b"e".to_vec(), - 412 => b"ee".to_vec(), - 413 => b"ai".to_vec(), - 414 => b"o".to_vec(), - 415 => b"oo".to_vec(), - 416 => b"au".to_vec(), - 417 => b"ka".to_vec(), - 418 => b"kha".to_vec(), - 419 => b"ga".to_vec(), - 420 => b"gha".to_vec(), - 421 => b"nga".to_vec(), - 422 => b"cha".to_vec(), - 423 => b"chha".to_vec(), - 424 => b"ja".to_vec(), - 425 => b"jha".to_vec(), - 426 => b"nya".to_vec(), - 427 => b"ta".to_vec(), - 428 => b"tha".to_vec(), - 429 => b"da".to_vec(), - 430 => b"dha".to_vec(), - 431 => b"na".to_vec(), - 432 => b"pa".to_vec(), - 433 => b"pha".to_vec(), - 434 => b"ba".to_vec(), - 435 => b"bha".to_vec(), - 436 => b"ma".to_vec(), - 437 => b"ya".to_vec(), - 438 => b"ra".to_vec(), - _ => b"unknown".to_vec(), - } - // match netuid { - // // Greek Alphabet (Lowercase) - // 0 => b"root".to_vec(), // Τ (Upper case Tau) - // 1 => b"apex".to_vec(), // α (Alpha) - // 2 => b"omron".to_vec(), // β (Beta) - // 3 => b"templar".to_vec(), // γ (Gamma) - // 4 => b"targon".to_vec(), // δ (Delta) - // 5 => b"kaito".to_vec(), // ε (Epsilon) - // 6 => b"infinite".to_vec(), // ζ (Zeta) - // 7 => b"subvortex".to_vec(), // η (Eta) - // 8 => b"ptn".to_vec(), // θ (Theta) - // 9 => b"pretrain".to_vec(), // ι (Iota) - // 10 => b"sturdy".to_vec(), // κ (Kappa) - // 11 => b"dippy".to_vec(), // λ (Lambda) - // 12 => b"horde".to_vec(), // μ (Mu) - // 13 => b"dataverse".to_vec(), // ν (Nu) - // 14 => b"palaidn".to_vec(), // ξ (Xi) - // 15 => b"deval".to_vec(), // ο (Omicron) - // 16 => b"bitads".to_vec(), // π (Pi) - // 17 => b"3gen".to_vec(), // ρ (Rho) - // 18 => b"cortex".to_vec(), // σ (Sigma) - // 19 => b"inference".to_vec(), // t (Tau) - // 20 => b"bitagent".to_vec(), // υ (Upsilon) - // 21 => b"any-any".to_vec(), // φ (Phi) - // 22 => b"meta".to_vec(), // χ (Chi) - // 23 => b"social".to_vec(), // ψ (Psi) - // 24 => b"omega".to_vec(), // ω (Omega) - // 25 => b"protein".to_vec(), // א (Aleph) - // 26 => b"alchemy".to_vec(), // ב (Bet) - // 27 => b"compute".to_vec(), // ג (Gimel) - // 28 => b"oracle".to_vec(), // ד (Dalet) - // 29 => b"coldint".to_vec(), // ה (He) - // 30 => b"bet".to_vec(), // ו (Vav) - // 31 => b"naschain".to_vec(), // ז (Zayin) - // 32 => b"itsai".to_vec(), // ח (Het) - // 33 => b"ready".to_vec(), // ט (Tet) - // 34 => b"mind".to_vec(), // י (Yod) - // 35 => b"logic".to_vec(), // ך (Final Kaf) - // 36 => b"automata".to_vec(), // כ (Kaf) - // 37 => b"tuning".to_vec(), // ל (Lamed) - // 38 => b"distributed".to_vec(), // ם (Final Mem) - // 39 => b"edge".to_vec(), // מ (Mem) - // 40 => b"chunk".to_vec(), // ן (Final Nun) - // 41 => b"sportsensor".to_vec(), // נ (Nun) - // 42 => b"masa".to_vec(), // ס (Samekh) - // 43 => b"graphite".to_vec(), // ע (Ayin) - // 44 => b"score".to_vec(), // ף (Final Pe) - // 45 => b"gen42".to_vec(), // פ (Pe) - // 46 => b"neural".to_vec(), // ץ (Final Tsadi) - // 47 => b"condense".to_vec(), // צ (Tsadi) - // 48 => b"nextplace".to_vec(), // ק (Qof) - // 49 => b"automl".to_vec(), // ר (Resh) - // 50 => b"audio".to_vec(), // ש (Shin) - // 51 => b"celium".to_vec(), // ת (Tav) - // 52 => b"dojo".to_vec(), // ا (Alif) - // 53 => b"frontier".to_vec(), // ب (Ba) - // 54 => b"safescan".to_vec(), // ت (Ta) - // 55 => b"unknown".to_vec(), // ث (Tha) - // 56 => b"gradients".to_vec(), // ج (Jim) - // 57 => b"gaia".to_vec(), // ح (Ha) - // 58 => b"dippy-speach".to_vec(), // خ (Kha) - // 59 => b"agent-arena".to_vec(), // د (Dal) - // 60 => b"unknown".to_vec(), // ذ (Dhal) - // 61 => b"red team".to_vec(), // ر (Ra) - // 62 => b"agentao".to_vec(), // ز (Zay) - // 63 => b"lean-in".to_vec(), // س (Sin) - // 64 => b"chutes".to_vec(), // ش (Shin) - // // Default case - // _ => b"unknown".to_vec(), // unknown subnet. - // } + SubnetIdentitiesV2::::try_get(netuid) + .and_then(|identity| { + if !identity.subnet_name.is_empty() { + Ok(identity.subnet_name) + } else { + Err(()) + } + }) + .unwrap_or_else(|_| { + match netuid { + 0 => b"root".to_vec(), // Τ (Upper case Tau) + 1 => b"apex".to_vec(), // α (Alpha) + 2 => b"omron".to_vec(), // β (Beta) + 3 => b"templar".to_vec(), // γ (Gamma) + 4 => b"targon".to_vec(), // δ (Delta) + 5 => b"kaito".to_vec(), // ε (Epsilon) + 6 => b"infinite".to_vec(), // ζ (Zeta) + 7 => b"subvortex".to_vec(), // η (Eta) + 8 => b"ptn".to_vec(), // θ (Theta) + 9 => b"pretrain".to_vec(), // ι (Iota) + 10 => b"sturdy".to_vec(), // κ (Kappa) + 11 => b"dippy".to_vec(), // λ (Lambda) + 12 => b"horde".to_vec(), // μ (Mu) + 13 => b"dataverse".to_vec(), // ν (Nu) + 14 => b"palaidn".to_vec(), // ξ (Xi) + 15 => b"deval".to_vec(), // ο (Omicron) + 16 => b"bitads".to_vec(), // π (Pi) + 17 => b"3gen".to_vec(), // ρ (Rho) + 18 => b"cortex".to_vec(), // σ (Sigma) + 19 => b"inference".to_vec(), // t (Tau) + 20 => b"bitagent".to_vec(), // υ (Upsilon) + 21 => b"any-any".to_vec(), // φ (Phi) + 22 => b"meta".to_vec(), // χ (Chi) + 23 => b"social".to_vec(), // ψ (Psi) + 24 => b"omega".to_vec(), // ω (Omega) + 25 => b"protein".to_vec(), // א (Aleph) + 26 => b"alchemy".to_vec(), // ב (Bet) + 27 => b"compute".to_vec(), // ג (Gimel) + 28 => b"oracle".to_vec(), // ד (Dalet) + 29 => b"coldint".to_vec(), // ה (He) + 30 => b"bet".to_vec(), // ו (Vav) + 31 => b"naschain".to_vec(), // ז (Zayin) + 32 => b"itsai".to_vec(), // ח (Het) + 33 => b"ready".to_vec(), // ט (Tet) + 34 => b"mind".to_vec(), // י (Yod) + 35 => b"logic".to_vec(), // ך (Final Kaf) + 36 => b"automata".to_vec(), // כ (Kaf) + 37 => b"tuning".to_vec(), // ל (Lamed) + 38 => b"distributed".to_vec(), // ם (Final Mem) + 39 => b"edge".to_vec(), // מ (Mem) + 40 => b"chunk".to_vec(), // ן (Final Nun) + 41 => b"sportsensor".to_vec(), // נ (Nun) + 42 => b"masa".to_vec(), // ס (Samekh) + 43 => b"graphite".to_vec(), // ע (Ayin) + 44 => b"score".to_vec(), // ף (Final Pe) + 45 => b"gen42".to_vec(), // פ (Pe) + 46 => b"neural".to_vec(), // ץ (Final Tsadi) + 47 => b"condense".to_vec(), // צ (Tsadi) + 48 => b"nextplace".to_vec(), // ק (Qof) + 49 => b"automl".to_vec(), // ר (Resh) + 50 => b"audio".to_vec(), // ש (Shin) + 51 => b"celium".to_vec(), // ת (Tav) + 52 => b"dojo".to_vec(), // ا (Alif) + 53 => b"frontier".to_vec(), // ب (Ba) + 54 => b"safescan".to_vec(), // ت (Ta) + 55 => b"unknown".to_vec(), // ث (Tha) + 56 => b"gradients".to_vec(), // ج (Jim) + 57 => b"gaia".to_vec(), // ح (Ha) + 58 => b"dippy-speach".to_vec(), // خ (Kha) + 59 => b"agent-arena".to_vec(), // د (Dal) + 60 => b"unknown".to_vec(), // ذ (Dhal) + 61 => b"red team".to_vec(), // ر (Ra) + 62 => b"agentao".to_vec(), // ز (Zay) + 63 => b"lean-in".to_vec(), // س (Sin) + 64 => b"chutes".to_vec(), // ش (Shin) + 65 => b"sad".to_vec(), + 66 => b"dad".to_vec(), + 67 => b"ta".to_vec(), + 68 => b"dha".to_vec(), + 69 => b"ain".to_vec(), + 70 => b"ghayn".to_vec(), + 71 => b"fa".to_vec(), + 72 => b"qaf".to_vec(), + 73 => b"kaf".to_vec(), + 74 => b"lam".to_vec(), + 75 => b"mim".to_vec(), + 76 => b"nun".to_vec(), + 77 => b"ha".to_vec(), + 78 => b"waw".to_vec(), + 79 => b"ya".to_vec(), + 80 => b"alef".to_vec(), + 81 => b"fehu".to_vec(), + 82 => b"uruz".to_vec(), + 83 => b"thurisaz".to_vec(), + 84 => b"ansuz".to_vec(), + 85 => b"raidho".to_vec(), + 86 => b"kaunan".to_vec(), + 87 => b"cyr_yeru".to_vec(), + 88 => b"algiz".to_vec(), + 89 => b"berkanan".to_vec(), + 90 => b"ogham".to_vec(), + 91 => b"beith".to_vec(), + 92 => b"luis".to_vec(), + 93 => b"fearn".to_vec(), + 94 => b"sail".to_vec(), + 95 => b"nion".to_vec(), + 96 => b"forfeda".to_vec(), + 97 => b"ani".to_vec(), + 98 => b"bani".to_vec(), + 99 => b"gani".to_vec(), + 100 => b"doni".to_vec(), + 101 => b"eni".to_vec(), + 102 => b"vini".to_vec(), + 103 => b"ayp".to_vec(), + 104 => b"ben".to_vec(), + 105 => b"gim".to_vec(), + 106 => b"da".to_vec(), + 107 => b"ech".to_vec(), + 108 => b"za".to_vec(), + 109 => b"armeni".to_vec(), + 110 => b"grave".to_vec(), + 111 => b"io".to_vec(), + 112 => b"dje".to_vec(), + 113 => b"gje".to_vec(), + 114 => b"ie".to_vec(), + 115 => b"dze".to_vec(), + 116 => b"hard_sign".to_vec(), + 117 => b"alfa".to_vec(), + 118 => b"alfas".to_vec(), + 119 => b"vida".to_vec(), // Ⲃ (Vida, 119) + 120 => b"vida_small".to_vec(), // ⲃ (Small Vida, 120) + 121 => b"gamma".to_vec(), // Ⲅ (Gamma, 121) + 122 => b"gamma_small".to_vec(), // ⲅ (Small Gamma, 122) + 123 => b"brahmi_a".to_vec(), // 𑀀 (A, 123) + 124 => b"brahmi_aa".to_vec(), // 𑀁 (Aa, 124) + 125 => b"brahmi_i".to_vec(), // 𑀂 (I, 125) + 126 => b"brahmi_ii".to_vec(), // 𑀃 (Ii, 126) + 127 => b"brahmi_u".to_vec(), // 𑀅 (U, 127) + 128 => b"la".to_vec(), + 129 => b"va".to_vec(), + 130 => b"sha".to_vec(), + 131 => b"ssa".to_vec(), + 132 => b"sa".to_vec(), + 133 => b"ha".to_vec(), + 134 => b"glagolitic_az".to_vec(), // Ⰰ (Az, 134) + 135 => b"glagolitic_buky".to_vec(), // Ⰱ (Buky, 135) + 136 => b"glagolitic_vede".to_vec(), // Ⰲ (Vede, 136) + 137 => b"glagolitic_glagoli".to_vec(), // Ⰳ (Glagoli, 137) + 138 => b"glagolitic_dobro".to_vec(), // Ⰴ (Dobro, 138) + 139 => b"glagolitic_yest".to_vec(), // Ⰵ (Yest, 139) + 140 => b"glagolitic_zhivete".to_vec(), // Ⰶ (Zhivete, 140) + 141 => b"glagolitic_zemlja".to_vec(), // Ⰷ (Zemlja, 141) + 142 => b"glagolitic_izhe".to_vec(), // Ⰸ (Izhe, 142) + 143 => b"glagolitic_initial_izhe".to_vec(), // Ⰹ (Initial Izhe, 143) + 144 => b"glagolitic_i".to_vec(), // Ⰺ (I, 144) + 145 => b"glagolitic_djerv".to_vec(), // Ⰻ (Djerv, 145) + 146 => b"glagolitic_kako".to_vec(), // Ⰼ (Kako, 146) + 147 => b"glagolitic_ljudije".to_vec(), // Ⰽ (Ljudije, 147) + 148 => b"glagolitic_myse".to_vec(), // Ⰾ (Myse, 148) + 149 => b"glagolitic_nash".to_vec(), // Ⰿ (Nash, 149) + 150 => b"glagolitic_on".to_vec(), // Ⱀ (On, 150) + 151 => b"glagolitic_pokoj".to_vec(), // Ⱁ (Pokoj, 151) + 152 => b"glagolitic_rtsy".to_vec(), // Ⱂ (Rtsy, 152) + 153 => b"glagolitic_slovo".to_vec(), // Ⱃ (Slovo, 153) + 154 => b"glagolitic_tvrido".to_vec(), // Ⱄ (Tvrido, 154) + 155 => b"glagolitic_uku".to_vec(), // Ⱅ (Uku, 155) + 156 => b"glagolitic_fert".to_vec(), // Ⱆ (Fert, 156) + 157 => b"glagolitic_xrivi".to_vec(), // Ⱇ (Xrivi, 157) + 158 => b"glagolitic_ot".to_vec(), // Ⱈ (Ot, 158) + 159 => b"glagolitic_cy".to_vec(), // Ⱉ (Cy, 159) + 160 => b"glagolitic_shcha".to_vec(), // Ⱊ (Shcha, 160) + 161 => b"glagolitic_er".to_vec(), // Ⱋ (Er, 161) + 162 => b"glagolitic_yeru".to_vec(), // Ⱌ (Yeru, 162) + 163 => b"glagolitic_small_yer".to_vec(), // Ⱍ (Small Yer, 163) + 164 => b"glagolitic_yo".to_vec(), // Ⱎ (Yo, 164) + 165 => b"glagolitic_yu".to_vec(), // Ⱏ (Yu, 165) + 166 => b"glagolitic_ja".to_vec(), // Ⱐ (Ja, 166) + 167 => b"thai_ko_kai".to_vec(), // ก (Ko Kai, 167) + 168 => b"thai_kho_khai".to_vec(), // ข (Kho Khai, 168) + 169 => b"thai_kho_khuat".to_vec(), // ฃ (Kho Khuat, 169) + 170 => b"thai_kho_khon".to_vec(), // ค (Kho Khon, 170) + 171 => b"thai_kho_rakhang".to_vec(), // ฅ (Kho Rakhang, 171) + 172 => b"thai_kho_khwai".to_vec(), // ฆ (Kho Khwai, 172) + 173 => b"thai_ngo_ngu".to_vec(), // ง (Ngo Ngu, 173) + 174 => b"thai_cho_chan".to_vec(), // จ (Cho Chan, 174) + 175 => b"thai_cho_ching".to_vec(), // ฉ (Cho Ching, 175) + 176 => b"thai_cho_chang".to_vec(), // ช (Cho Chang, 176) + 177 => b"thai_so_so".to_vec(), // ซ (So So, 177) + 178 => b"thai_cho_choe".to_vec(), // ฌ (Cho Choe, 178) + 179 => b"thai_yo_ying".to_vec(), // ญ (Yo Ying, 179) + 180 => b"thai_do_chada".to_vec(), // ฎ (Do Chada, 180) + 181 => b"thai_to_patak".to_vec(), // ฏ (To Patak, 181) + 182 => b"thai_tho_than".to_vec(), // ฐ (Tho Than, 182) + 183 => b"thai_tho_nangmontho".to_vec(), // ฑ (Tho Nangmontho, 183) + 184 => b"thai_tho_phuthao".to_vec(), // ฒ (Tho Phuthao, 184) + 185 => b"thai_no_nen".to_vec(), // ณ (No Nen, 185) + 186 => b"thai_do_dek".to_vec(), // ด (Do Dek, 186) + 187 => b"thai_to_tao".to_vec(), // ต (To Tao, 187) + 188 => b"thai_tho_thung".to_vec(), // ถ (Tho Thung, 188) + 189 => b"thai_tho_thahan".to_vec(), // ท (Tho Thahan, 189) + 190 => b"thai_tho_thong".to_vec(), // ธ (Tho Thong, 190) + 191 => b"thai_no_nu".to_vec(), // น (No Nu, 191) + 192 => b"thai_bo_baimai".to_vec(), // บ (Bo Baimai, 192) + 193 => b"thai_po_pla".to_vec(), // ป (Po Pla, 193) + 194 => b"thai_pho_phung".to_vec(), // ผ (Pho Phung, 194) + 195 => b"thai_fo_fa".to_vec(), // ฝ (Fo Fa, 195) + 196 => b"thai_pho_phan".to_vec(), // พ (Pho Phan, 196) + 197 => b"thai_fo_fan".to_vec(), // ฟ (Fo Fan, 197) + 198 => b"thai_pho_samphao".to_vec(), // ภ (Pho Samphao, 198) + 199 => b"thai_mo_ma".to_vec(), // ม (Mo Ma, 199) + 200 => b"thai_yo_yak".to_vec(), // ย (Yo Yak, 200) + 201 => b"thai_ro_rua".to_vec(), // ร (Ro Rua, 201) + 202 => b"thai_lo_ling".to_vec(), // ล (Lo Ling, 202) + 203 => b"thai_wo_waen".to_vec(), // ว (Wo Waen, 203) + 204 => b"thai_so_sala".to_vec(), // ศ (So Sala, 204) + 205 => b"thai_so_rusi".to_vec(), // ษ (So Rusi, 205) + 206 => b"thai_so_sua".to_vec(), // ส (So Sua, 206) + 207 => b"thai_ho_hip".to_vec(), // ห (Ho Hip, 207) + 208 => b"thai_lo_chula".to_vec(), // ฬ (Lo Chula, 208) + 209 => b"thai_o_ang".to_vec(), // อ (O Ang, 209) + 210 => b"thai_ho_nokhuk".to_vec(), // ฮ (Ho Nokhuk, 210) + 211 => b"hangul_giyeok".to_vec(), // ㄱ (Giyeok, 211) + 212 => b"hangul_nieun".to_vec(), // ㄴ (Nieun, 212) + 213 => b"hangul_digeut".to_vec(), // ㄷ (Digeut, 213) + 214 => b"hangul_rieul".to_vec(), // ㄹ (Rieul, 214) + 215 => b"hangul_mieum".to_vec(), // ㅁ (Mieum, 215) + 216 => b"hangul_bieup".to_vec(), // ㅂ (Bieup, 216) + 217 => b"hangul_siot".to_vec(), // ㅅ (Siot, 217) + 218 => b"hangul_ieung".to_vec(), // ㅇ (Ieung, 218) + 219 => b"hangul_jieut".to_vec(), // ㅈ (Jieut, 219) + 220 => b"hangul_chieut".to_vec(), // ㅊ (Chieut, 220) + 221 => b"hangul_kieuk".to_vec(), // ㅋ (Kieuk, 221) + 222 => b"hangul_tieut".to_vec(), // ㅌ (Tieut, 222) + 223 => b"hangul_pieup".to_vec(), // ㅍ (Pieup, 223) + 224 => b"hangul_hieut".to_vec(), // ㅎ (Hieut, 224) + 225 => b"hangul_a".to_vec(), // ㅏ (A, 225) + 226 => b"hangul_ae".to_vec(), // ㅐ (Ae, 226) + 227 => b"hangul_ya".to_vec(), // ㅑ (Ya, 227) + 228 => b"hangul_yae".to_vec(), // ㅒ (Yae, 228) + 229 => b"hangul_eo".to_vec(), // ㅓ (Eo, 229) + 230 => b"hangul_e".to_vec(), // ㅔ (E, 230) + 231 => b"hangul_yeo".to_vec(), // ㅕ (Yeo, 231) + 232 => b"hangul_ye".to_vec(), // ㅖ (Ye, 232) + 233 => b"hangul_o".to_vec(), // ㅗ (O, 233) + 234 => b"hangul_wa".to_vec(), // ㅘ (Wa, 234) + 235 => b"hangul_wae".to_vec(), // ㅙ (Wae, 235) + 236 => b"hangul_oe".to_vec(), // ㅚ (Oe, 236) + 237 => b"hangul_yo".to_vec(), // ㅛ (Yo, 237) + 238 => b"hangul_u".to_vec(), // ㅜ (U, 238) + 239 => b"hangul_weo".to_vec(), // ㅝ (Weo, 239) + 240 => b"hangul_we".to_vec(), // ㅞ (We, 240) + 241 => b"hangul_wi".to_vec(), // ㅟ (Wi, 241) + 242 => b"hangul_yu".to_vec(), // ㅠ (Yu, 242) + 243 => b"hangul_eu".to_vec(), // ㅡ (Eu, 243) + 244 => b"hangul_ui".to_vec(), // ㅢ (Ui, 244) + 245 => b"hangul_i".to_vec(), // ㅣ (I, 245) + 246 => b"ethiopic_glottal_a".to_vec(), // አ (Glottal A, 246) + 247 => b"ethiopic_glottal_u".to_vec(), // ኡ (Glottal U, 247) + 248 => b"ethiopic_glottal_i".to_vec(), // ኢ (Glottal I, 248) + 249 => b"ethiopic_glottal_aa".to_vec(), // ኣ (Glottal Aa, 249) + 250 => b"ethiopic_glottal_e".to_vec(), // ኤ (Glottal E, 250) + 251 => b"ethiopic_glottal_ie".to_vec(), // እ (Glottal Ie, 251) + 252 => b"ethiopic_glottal_o".to_vec(), // ኦ (Glottal O, 252) + 253 => b"ethiopic_glottal_wa".to_vec(), // ኧ (Glottal Wa, 253) + 254 => b"ethiopic_wa".to_vec(), // ወ (Wa, 254) + 255 => b"ethiopic_wu".to_vec(), // ዉ (Wu, 255) + 256 => b"ethiopic_wi".to_vec(), // ዊ (Wi, 256) + 257 => b"ethiopic_waa".to_vec(), // ዋ (Waa, 257) + 258 => b"ethiopic_we".to_vec(), // ዌ (We, 258) + 259 => b"ethiopic_wye".to_vec(), // ው (Wye, 259) + 260 => b"ethiopic_wo".to_vec(), // ዎ (Wo, 260) + 261 => b"ethiopic_ko".to_vec(), // ኰ (Ko, 261) + 262 => b"ethiopic_ku".to_vec(), // ኱ (Ku, 262) + 263 => b"ethiopic_ki".to_vec(), // ኲ (Ki, 263) + 264 => b"ethiopic_kua".to_vec(), // ኳ (Kua, 264) + 265 => b"ethiopic_ke".to_vec(), // ኴ (Ke, 265) + 266 => b"ethiopic_kwe".to_vec(), // ኵ (Kwe, 266) + 267 => b"ethiopic_ko_alt".to_vec(), // ኶ (Ko, 267) + 268 => b"ethiopic_go".to_vec(), // ጐ (Go, 268) + 269 => b"ethiopic_gu".to_vec(), // ጑ (Gu, 269) + 270 => b"ethiopic_gi".to_vec(), // ጒ (Gi, 270) + 271 => b"ethiopic_gua".to_vec(), // መ (Gua, 271) + 272 => b"ethiopic_ge".to_vec(), // ጔ (Ge, 272) + 273 => b"ethiopic_gwe".to_vec(), // ጕ (Gwe, 273) + 274 => b"ethiopic_go_alt".to_vec(), // ጖ (Go, 274) + 275 => b"devanagari_a".to_vec(), // अ (A, 275) + 276 => b"devanagari_aa".to_vec(), // आ (Aa, 276) + 277 => b"devanagari_i".to_vec(), // इ (I, 277) + 278 => b"devanagari_ii".to_vec(), // ई (Ii, 278) + 279 => b"devanagari_u".to_vec(), // उ (U, 279) + 280 => b"devanagari_uu".to_vec(), // ऊ (Uu, 280) + 281 => b"devanagari_r".to_vec(), // ऋ (R, 281) + 282 => b"devanagari_e".to_vec(), // ए (E, 282) + 283 => b"devanagari_ai".to_vec(), // ऐ (Ai, 283) + 284 => b"devanagari_o".to_vec(), // ओ (O, 284) + 285 => b"devanagari_au".to_vec(), // औ (Au, 285) + 286 => b"devanagari_ka".to_vec(), // क (Ka, 286) + 287 => b"devanagari_kha".to_vec(), // ख (Kha, 287) + 288 => b"devanagari_ga".to_vec(), // ग (Ga, 288) + 289 => b"devanagari_gha".to_vec(), // घ (Gha, 289) + 290 => b"devanagari_nga".to_vec(), // ङ (Nga, 290) + 291 => b"devanagari_cha".to_vec(), // च (Cha, 291) + 292 => b"devanagari_chha".to_vec(), // छ (Chha, 292) + 293 => b"devanagari_ja".to_vec(), // ज (Ja, 293) + 294 => b"devanagari_jha".to_vec(), // झ (Jha, 294) + 295 => b"devanagari_nya".to_vec(), // ञ (Nya, 295) + 296 => b"devanagari_ta".to_vec(), // ट (Ta, 296) + 297 => b"devanagari_tha".to_vec(), // ठ (Tha, 297) + 298 => b"devanagari_da".to_vec(), // ड (Da, 298) + 299 => b"devanagari_dha".to_vec(), // ढ (Dha, 299) + 300 => b"devanagari_na".to_vec(), // ण (Na, 300) + 301 => b"devanagari_ta_alt".to_vec(), // त (Ta, 301) + 302 => b"devanagari_tha_alt".to_vec(), // थ (Tha, 302) + 303 => b"devanagari_da_alt".to_vec(), // द (Da, 303) + 304 => b"devanagari_dha_alt".to_vec(), // ध (Dha, 304) + 305 => b"devanagari_na_alt".to_vec(), // न (Na, 305) + 306 => b"devanagari_pa".to_vec(), // प (Pa, 306) + 307 => b"devanagari_pha".to_vec(), // फ (Pha, 307) + 308 => b"devanagari_ba".to_vec(), // ब (Ba, 308) + 309 => b"devanagari_bha".to_vec(), // भ (Bha, 309) + 310 => b"devanagari_ma".to_vec(), // म (Ma, 310) + 311 => b"devanagari_ya".to_vec(), // य (Ya, 311) + 312 => b"devanagari_ra".to_vec(), // र (Ra, 312) + 313 => b"devanagari_la".to_vec(), // ल (La, 313) + 314 => b"devanagari_va".to_vec(), // व (Va, 314) + 315 => b"devanagari_sha".to_vec(), // श (Sha, 315) + 316 => b"devanagari_ssa".to_vec(), // ष (Ssa, 316) + 317 => b"devanagari_sa".to_vec(), // स (Sa, 317) + 318 => b"devanagari_ha".to_vec(), // ह (Ha, 318) + 319 => b"katakana_a".to_vec(), // ア (A, 319) + 320 => b"kana_i".to_vec(), + 321 => b"kana_u".to_vec(), + 322 => b"kana_e".to_vec(), + 323 => b"kana_o".to_vec(), + 324 => b"kana_a".to_vec(), + 325 => b"kana_ki".to_vec(), + 326 => b"kana_ku".to_vec(), + 327 => b"kana_ke".to_vec(), + 328 => b"kana_ko".to_vec(), + 329 => b"kana_sa".to_vec(), + 330 => b"kana_shi".to_vec(), + 331 => b"kana_su".to_vec(), + 332 => b"kana_se".to_vec(), + 333 => b"kana_so".to_vec(), + 334 => b"kana_ta".to_vec(), + 335 => b"kana_chi".to_vec(), + 336 => b"kana_tsu".to_vec(), + 337 => b"kana_te".to_vec(), + 338 => b"kana_to".to_vec(), + 339 => b"kana_na".to_vec(), + 340 => b"kana_ni".to_vec(), + 341 => b"kana_nu".to_vec(), + 342 => b"kana_ne".to_vec(), + 343 => b"kana_no".to_vec(), + 344 => b"kana_ha".to_vec(), + 345 => b"kana_hi".to_vec(), + 346 => b"kana_fu".to_vec(), + 347 => b"kana_he".to_vec(), + 348 => b"kana_ho".to_vec(), + 349 => b"kana_ma".to_vec(), + 350 => b"kana_mi".to_vec(), + 351 => b"kana_mu".to_vec(), + 352 => b"kana_me".to_vec(), + 353 => b"kana_mo".to_vec(), + 354 => b"kana_ya".to_vec(), + 355 => b"kana_yu".to_vec(), + 356 => b"kana_yo".to_vec(), + 357 => b"kana_ra".to_vec(), + 358 => b"kana_ri".to_vec(), + 359 => b"kana_ru".to_vec(), + 360 => b"kana_re".to_vec(), + 361 => b"kana_ro".to_vec(), + 362 => b"kana_wa".to_vec(), + 363 => b"kana_wo".to_vec(), + 364 => b"kana_n".to_vec(), + 365 => b"ya".to_vec(), + 366 => b"yab".to_vec(), + 367 => b"yabh".to_vec(), + 368 => b"yag".to_vec(), + 369 => b"yagh".to_vec(), + 370 => b"yaj".to_vec(), + 371 => b"yach".to_vec(), + 372 => b"yad".to_vec(), + 373 => b"yadh".to_vec(), + 374 => b"yadhe".to_vec(), + 375 => b"yaz".to_vec(), + 376 => b"yazh".to_vec(), + 377 => b"yaf".to_vec(), + 378 => b"yak".to_vec(), + 379 => b"yakv".to_vec(), + 380 => b"yaq".to_vec(), + 381 => b"yah".to_vec(), + 382 => b"yahh".to_vec(), + 383 => b"yahl".to_vec(), + 384 => b"yahm".to_vec(), + 385 => b"yayn".to_vec(), + 386 => b"yakh".to_vec(), + 387 => b"yakl".to_vec(), + 388 => b"yahq".to_vec(), + 389 => b"yash".to_vec(), + 390 => b"yi".to_vec(), + 391 => b"yij".to_vec(), + 392 => b"yizh".to_vec(), + 393 => b"yink".to_vec(), + 394 => b"yal".to_vec(), + 395 => b"yam".to_vec(), + 396 => b"yan".to_vec(), + 397 => b"yang".to_vec(), + 398 => b"yany".to_vec(), + 399 => b"yap".to_vec(), + 400 => b"yu".to_vec(), + 401 => b"a".to_vec(), + 402 => b"aa".to_vec(), + 403 => b"i".to_vec(), + 404 => b"ii".to_vec(), + 405 => b"u".to_vec(), + 406 => b"uu".to_vec(), + 407 => b"r".to_vec(), + 408 => b"rr".to_vec(), + 409 => b"l".to_vec(), + 410 => b"ll".to_vec(), + 411 => b"e".to_vec(), + 412 => b"ee".to_vec(), + 413 => b"ai".to_vec(), + 414 => b"o".to_vec(), + 415 => b"oo".to_vec(), + 416 => b"au".to_vec(), + 417 => b"ka".to_vec(), + 418 => b"kha".to_vec(), + 419 => b"ga".to_vec(), + 420 => b"gha".to_vec(), + 421 => b"nga".to_vec(), + 422 => b"cha".to_vec(), + 423 => b"chha".to_vec(), + 424 => b"ja".to_vec(), + 425 => b"jha".to_vec(), + 426 => b"nya".to_vec(), + 427 => b"ta".to_vec(), + 428 => b"tha".to_vec(), + 429 => b"da".to_vec(), + 430 => b"dha".to_vec(), + 431 => b"na".to_vec(), + 432 => b"pa".to_vec(), + 433 => b"pha".to_vec(), + 434 => b"ba".to_vec(), + 435 => b"bha".to_vec(), + 436 => b"ma".to_vec(), + 437 => b"ya".to_vec(), + 438 => b"ra".to_vec(), + _ => b"unknown".to_vec(), + } + }) } pub fn get_symbol_for_subnet(netuid: u16) -> Vec { diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs new file mode 100644 index 0000000000..e1db49203e --- /dev/null +++ b/pallets/subtensor/src/tests/consensus.rs @@ -0,0 +1,547 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +use super::mock::*; +use crate::*; + +use frame_support::assert_ok; +use rand::{Rng, SeedableRng, distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng}; +use sp_core::U256; +use std::time::Instant; +use substrate_fixed::transcendental::{PI, cos, ln, sqrt}; +use substrate_fixed::types::{I32F32, I64F64}; + +pub fn fixed(val: f32) -> I32F32 { + I32F32::from_num(val) +} + +pub fn fixed_to_u16(x: I32F32) -> u16 { + x.to_num::() +} + +pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { + fixed_to_u16(x * I32F32::from_num(u16::MAX)) +} + +// Normalizes (sum to 1 except 0) the input vector directly in-place. +#[allow(dead_code)] +pub fn inplace_normalize(x: &mut [I32F32]) { + let x_sum: I32F32 = x.iter().sum(); + if x_sum == I32F32::from_num(0.0_f32) { + return; + } + for i in x.iter_mut() { + *i /= x_sum; + } +} + +// Inplace normalize the passed positive integer weights so that they sum to u16 max value. +fn normalize_weights(mut weights: Vec) -> Vec { + let sum: u64 = weights.iter().map(|x| *x as u64).sum(); + if sum == 0 { + return weights; + } + weights.iter_mut().for_each(|x| { + *x = (*x as u64 * u16::MAX as u64 / sum) as u16; + }); + weights +} + +// Return as usize an I32F32 ratio of a usize input, avoiding the 0% and 100% extremes. +fn non_extreme_fixed_ratio(ratio: I32F32, total: usize) -> usize { + if total == 0 { + return total; + } + let mut subset: usize = (ratio * I32F32::from_num(total)).to_num::(); + if subset == 0 { + subset = 1; + } else if subset == total { + subset = total - 1; + } + subset +} + +// Box-Muller Transform converting two uniform random samples to a normal random sample. +fn normal(size: usize, rng: &mut StdRng, dist: &Uniform) -> Vec { + let max: I32F32 = I32F32::from_num(u16::MAX); + let two: I32F32 = I32F32::from_num(2); + let eps: I32F32 = I32F32::from_num(0.000001); + let pi: I32F32 = I32F32::from_num(PI); + + let uniform_u16: Vec = (0..(2 * size)).map(|_| rng.sample(dist)).collect(); + let uniform: Vec = uniform_u16 + .iter() + .map(|&x| I32F32::from_num(x) / max) + .collect(); + let mut normal: Vec = vec![I32F32::from_num(0); size]; + + for i in 0..size { + let u1: I32F32 = uniform[i] + eps; + let u2: I32F32 = uniform[i + size] + eps; + normal[i] = sqrt::(-two * ln::(u1).expect("")).expect("") + * cos(two * pi * u2); + } + normal +} + +// Returns validators and servers uids with either blockwise, regular, or random interleaving. +fn distribute_nodes( + validators_n: usize, + network_n: usize, + interleave: usize, +) -> (Vec, Vec) { + let mut validators: Vec = vec![]; + let mut servers: Vec = vec![]; + + if interleave == 0 { + // blockwise [validator_block, server_block] + validators = (0..validators_n as u16).collect(); + servers = (validators_n as u16..network_n as u16).collect(); + } else if interleave == 1 { + // regular interleaving [val, srv, srv, ..., srv, val, srv, srv, ..., srv, val, srv, ..., srv] + (validators, servers) = (0..network_n as u16) + .collect::>() + .iter() + .partition(|&i| *i as usize % (network_n / validators_n) == 0); + } else if interleave == 2 { + // random interleaving + let mut permuted_uids: Vec = (0..network_n as u16).collect(); + permuted_uids.shuffle(&mut thread_rng()); + validators = permuted_uids[0..validators_n].into(); + servers = permuted_uids[validators_n..network_n].into(); + } + + (validators, servers) +} + +#[allow(dead_code)] +fn uid_stats(netuid: u16, uid: u16) { + log::info!( + "stake: {:?}", + SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))) + ); + log::info!("rank: {:?}", SubtensorModule::get_rank_for_uid(netuid, uid)); + log::info!( + "trust: {:?}", + SubtensorModule::get_trust_for_uid(netuid, uid) + ); + log::info!( + "consensus: {:?}", + SubtensorModule::get_consensus_for_uid(netuid, uid) + ); + log::info!( + "incentive: {:?}", + SubtensorModule::get_incentive_for_uid(netuid, uid) + ); + log::info!( + "dividend: {:?}", + SubtensorModule::get_dividends_for_uid(netuid, uid) + ); + log::info!( + "emission: {:?}", + SubtensorModule::get_emission_for_uid(netuid, uid) + ); +} + +#[allow(clippy::too_many_arguments)] +fn init_run_epochs( + netuid: u16, + n: u16, + validators: &[u16], + servers: &[u16], + epochs: u16, + stake_per_validator: u64, + server_self: bool, + input_stake: &[u64], + use_input_stake: bool, + input_weights: &[Vec<(u16, u16)>], + use_input_weights: bool, + random_weights: bool, + random_seed: u64, + sparse: bool, + bonds_penalty: u16, +) { + // === Create the network + add_network(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead + + // === Set bonds penalty + SubtensorModule::set_bonds_penalty(netuid, bonds_penalty); + + // === Register uids + SubtensorModule::set_max_allowed_uids(netuid, n); + for key in 0..n { + let stake = if use_input_stake { + input_stake[key as usize] + } else if validators.contains(&key) { + stake_per_validator + } else { + // only validators receive stake + 0 + }; + + // let stake: u64 = 1; // alternative test: all nodes receive stake, should be same outcome, except stake + SubtensorModule::add_balance_to_coldkey_account(&(U256::from(key)), stake); + SubtensorModule::append_neuron(netuid, &(U256::from(key)), 0); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stake, + ); + } + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, validators.len() as u16); + assert_eq!( + SubtensorModule::get_max_allowed_validators(netuid), + validators.len() as u16 + ); + SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators + run_to_block(1); // run to next block to ensure weights are set on nodes after their registration block + + // === Set weights + let mut rng = StdRng::seed_from_u64(random_seed); // constant seed so weights over multiple runs are equal + let range = Uniform::new(0, u16::MAX); + let mut weights: Vec = vec![u16::MAX / n; servers.len()]; + for uid in validators { + if random_weights { + weights = (0..servers.len()).map(|_| rng.sample(range)).collect(); + weights = normalize_weights(weights); + // assert_eq!(weights.iter().map(|x| *x as u64).sum::(), u16::MAX as u64); // normalized weight sum not always u16::MAX + } + if use_input_weights { + let sparse_weights = input_weights[*uid as usize].clone(); + weights = sparse_weights.iter().map(|(_, w)| *w).collect(); + let srvs: Vec = sparse_weights.iter().map(|(s, _)| *s).collect(); + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(*uid as u64)), + netuid, + srvs, + weights.clone(), + 0 + )); + } else { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(*uid as u64)), + netuid, + servers.to_vec(), + weights.clone(), + 0 + )); + } + } + if server_self { + for uid in servers { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(*uid as u64)), + netuid, + vec![*uid], + vec![u16::MAX], + 0 + )); // server self-weight + } + } + + // === Run the epochs. + log::info!("Start {epochs} epoch(s)"); + let start = Instant::now(); + for _ in 0..epochs { + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + } + let duration = start.elapsed(); + log::info!( + "Time elapsed in (sparse={sparse}) epoch() is: {:?}", + duration + ); + + // let bonds = SubtensorModule::get_bonds( netuid ); + // for (uid, node) in [ (validators[0], "validator"), (servers[0], "server") ] { + // log::info!("\n{node}" ); + // uid_stats(netuid, uid); + // log::info!("bonds: {:?} (on validator), {:?} (on server)", bonds[uid as usize][0], bonds[uid as usize][servers[0] as usize]); + // } +} + +// Generate a random graph that is split into a major and minor set, each setting specific weight on itself and the complement on the other. +fn split_graph( + major_stake: I32F32, + major_weight: I32F32, + minor_weight: I32F32, + weight_stddev: I32F32, + validators_n: usize, + network_n: usize, + interleave: usize, +) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec>, + I32F32, +) { + let servers_n: usize = network_n - validators_n; + let major_servers_n: usize = non_extreme_fixed_ratio(major_stake, servers_n); + let major_validators_n: usize = non_extreme_fixed_ratio(major_stake, validators_n); + + let (validators, servers) = distribute_nodes(validators_n, network_n, interleave); + let major_validators: Vec = (0..major_validators_n).map(|i| validators[i]).collect(); + let minor_validators: Vec = (major_validators_n..validators_n) + .map(|i| validators[i]) + .collect(); + let major_servers: Vec = (0..major_servers_n).map(|i| servers[i]).collect(); + let minor_servers: Vec = (major_servers_n..servers_n).map(|i| servers[i]).collect(); + + let zero: I32F32 = I32F32::from_num(0); + let one: I32F32 = I32F32::from_num(1); + let stddev: I32F32 = I32F32::from_num(0.3); + let total_stake: I64F64 = I64F64::from_num(21_000_000_000_000_000_u64); + let mut rng = StdRng::seed_from_u64(0); // constant seed so weights over multiple runs are equal + let dist = Uniform::new(0, u16::MAX); + + let mut stake: Vec = vec![0; network_n]; + let mut stake_fixed: Vec = vec![zero; network_n]; + for (ratio, vals) in [ + (major_stake, &major_validators), + (one - major_stake, &minor_validators), + ] { + let mut sample: Vec = normal(vals.len(), &mut rng, &dist) + .iter() + .map(|x: &I32F32| { + let v: I32F32 = (stddev * x) + one; + if v < zero { zero } else { v } + }) + .collect(); + inplace_normalize(&mut sample); + for (i, &val) in vals.iter().enumerate() { + stake[val as usize] = + (I64F64::from_num(ratio) * I64F64::from_num(sample[i]) * total_stake) + .to_num::(); + stake_fixed[val as usize] = + I32F32::from_num(I64F64::from_num(ratio) * I64F64::from_num(sample[i])); + } + } + + let mut weights: Vec> = vec![vec![]; network_n]; + let mut weights_fixed: Vec> = vec![vec![zero; network_n]; network_n]; + for (first, second, vals) in [ + (major_weight, one - major_weight, &major_validators), + (one - minor_weight, minor_weight, &minor_validators), + ] { + for &val in vals { + for (weight, srvs) in [(first, &major_servers), (second, &minor_servers)] { + let mut sample: Vec = normal(srvs.len(), &mut rng, &dist) + .iter() + .map(|x: &I32F32| { + let v: I32F32 = (weight_stddev * x) + one; + if v < zero { zero } else { v } + }) + .collect(); + inplace_normalize(&mut sample); + + for (i, &srv) in srvs.iter().enumerate() { + weights[val as usize].push((srv, fixed_proportion_to_u16(weight * sample[i]))); + weights_fixed[val as usize][srv as usize] = weight * sample[i]; + } + } + inplace_normalize(&mut weights_fixed[val as usize]); + } + } + + inplace_normalize(&mut stake_fixed); + + // Calculate stake-weighted mean per server + let mut weight_mean: Vec = vec![zero; network_n]; + for val in 0..network_n { + if stake_fixed[val] > zero { + for (srv, weight_mean_row) in weight_mean.iter_mut().enumerate().take(network_n) { + *weight_mean_row += stake_fixed[val] * weights_fixed[val][srv]; + } + } + } + + // Calculate stake-weighted absolute standard deviation + let mut weight_dev: Vec = vec![zero; network_n]; + for val in 0..network_n { + if stake_fixed[val] > zero { + for srv in 0..network_n { + weight_dev[srv] += + stake_fixed[val] * (weight_mean[srv] - weights_fixed[val][srv]).abs(); + } + } + } + + // Calculate rank-weighted mean of weight_dev + let avg_weight_dev: I32F32 = + weight_dev.iter().sum::() / weight_mean.iter().sum::(); + + ( + validators, + servers, + major_validators, + minor_validators, + major_servers, + minor_servers, + stake, + weights, + avg_weight_dev, + ) +} + +// Test consensus guarantees with an epoch on a graph with 4096 nodes, of which the first 128 are validators, the graph is split into a major and minor set, each setting specific weight on itself and the complement on the other. Asserts that the major emission ratio >= major stake ratio. +// #[test] +// fn test_consensus_guarantees() { +// let netuid: u16 = 0; +// let network_n: u16 = 512; +// let validators_n: u16 = 64; +// let epochs: u16 = 1; +// let interleave = 2; +// log::info!("test_consensus_guarantees ({network_n:?}, {validators_n:?} validators)"); +// for (major_stake, major_weight, minor_weight, weight_stddev) in [ +// (0.51, 1., 1., 0.001), +// (0.51, 0.03, 0., 0.001), +// (0.51, 0.51, 0.49, 0.001), +// (0.51, 0.51, 1., 0.001), +// (0.51, 0.61, 0.8, 0.1), +// (0.6, 0.67, 0.65, 0.2), +// (0.6, 0.74, 0.77, 0.4), +// (0.6, 0.76, 0.8, 0.4), +// (0.6, 0.76, 1., 0.4), +// (0.6, 0.92, 1., 0.4), +// (0.6, 0.94, 1., 0.4), +// (0.65, 0.78, 0.85, 0.6), +// (0.7, 0.81, 0.85, 0.8), +// (0.7, 0.83, 0.85, 1.), +// ] { +// let ( +// validators, +// servers, +// major_validators, +// minor_validators, +// major_servers, +// minor_servers, +// stake, +// weights, +// _avg_weight_dev, +// ) = split_graph( +// fixed(major_stake), +// fixed(major_weight), +// fixed(minor_weight), +// fixed(weight_stddev), +// validators_n as usize, +// network_n as usize, +// interleave as usize, +// ); + +// new_test_ext(1).execute_with(|| { +// init_run_epochs( +// netuid, +// network_n, +// &validators, +// &servers, +// epochs, +// 1, +// true, +// &stake, +// true, +// &weights, +// true, +// false, +// 0, +// false, +// ); + +// let mut major_emission: I64F64 = I64F64::from_num(0); +// let mut minor_emission: I64F64 = I64F64::from_num(0); +// for set in [major_validators, major_servers] { +// for uid in set { +// major_emission += +// I64F64::from_num(SubtensorModule::get_emission_for_uid(netuid, uid)); +// } +// } +// for set in [minor_validators, minor_servers] { +// for uid in set { +// minor_emission += +// I64F64::from_num(SubtensorModule::get_emission_for_uid(netuid, uid)); +// } +// } +// let major_ratio: I32F32 = +// I32F32::from_num(major_emission / (major_emission + minor_emission)); +// assert!(major_stake <= major_ratio); +// }); +// } +// } + +// Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, of which the first 64 are validators, the graph is split into a major and minor set, each setting specific weight on itself and the complement on the other. +#[test] +#[ignore] // Not an automated test! +fn map_consensus_guarantees() { + let netuid: u16 = 1; + let network_n: u16 = 512; + let validators_n: u16 = 64; + let epochs: u16 = 1; + let interleave = 0; + let weight_stddev: I32F32 = fixed(0.4); + let bonds_penalty: u16 = + (std::env::args().nth(2).unwrap().parse::().unwrap() * f32::from(u16::MAX - 1)) as u16; + println!("["); + for _major_stake in [0.51, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99] { + let major_stake: I32F32 = I32F32::from_num(_major_stake); + for _major_weight in 0..51 { + let major_weight: I32F32 = I32F32::from_num(50 - _major_weight) / I32F32::from_num(50); + for _minor_weight in 0..51 { + let minor_weight: I32F32 = + I32F32::from_num(50 - _minor_weight) / I32F32::from_num(50); + let ( + validators, + servers, + major_validators, + minor_validators, + major_servers, + minor_servers, + stake, + weights, + avg_weight_dev, + ) = split_graph( + major_stake, + major_weight, + minor_weight, + weight_stddev, + validators_n as usize, + network_n as usize, + interleave as usize, + ); + + new_test_ext(1).execute_with(|| { + init_run_epochs(netuid, network_n, &validators, &servers, epochs, 1, true, &stake, true, &weights, true, false, 0, true, bonds_penalty); + + let mut major_emission: I64F64 = I64F64::from_num(0); + let mut minor_emission: I64F64 = I64F64::from_num(0); + for set in [major_validators, major_servers] { + for uid in set { + major_emission += I64F64::from_num(SubtensorModule::get_emission_for_uid( netuid, uid )); + } + } + for set in [minor_validators, minor_servers] { + for uid in set { + minor_emission += I64F64::from_num(SubtensorModule::get_emission_for_uid( netuid, uid )); + } + } + let major_ratio: I32F32 = I32F32::from_num(major_emission / (major_emission + minor_emission)); + println!("[{major_stake}, {major_weight:.2}, {minor_weight:.2}, {avg_weight_dev:.3}, {major_ratio:.3}], "); + }); + } + } + } + println!("]"); +} diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index e4b2f02574..a6e688625d 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -5,7 +5,7 @@ )] use super::mock::*; -use crate::epoch::math::safe_exp; +use crate::epoch::math::{fixed, u16_proportion_to_fixed}; use crate::*; use approx::assert_abs_diff_eq; @@ -983,7 +983,7 @@ fn test_512_graph_random_weights() { // }); // } -// Test bonds exponential moving average over a sequence of epochs. +// Test bonds exponential moving average over a sequence of epochs - no liquid alpha #[test] fn test_bonds() { new_test_ext(1).execute_with(|| { @@ -1287,223 +1287,6 @@ fn test_bonds() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_512_graph_random_weights --exact --show-output --nocapture -#[test] -fn test_bonds_with_liquid_alpha() { - new_test_ext(1).execute_with(|| { - let sparse: bool = true; - let n: u16 = 8; - let netuid: u16 = 1; - let tempo: u16 = 1; - let max_stake: u64 = 4; - let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; - let block_number = System::block_number(); - add_network(netuid, tempo, 0); - SubtensorModule::set_max_allowed_uids(netuid, n); - SubtensorModule::set_max_registrations_per_block(netuid, n); - SubtensorModule::set_target_registrations_per_interval(netuid, n); - SubtensorModule::set_weights_set_rate_limit(netuid, 0); - SubtensorModule::set_min_allowed_weights(netuid, 1); - SubtensorModule::set_max_weight_limit(netuid, u16::MAX); - - // Register validators and servers - for key in 0..n as u64 { - SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - key * 1_000_000, - &U256::from(key), - ); - assert_ok!(SubtensorModule::register( - RuntimeOrigin::signed(U256::from(key)), - netuid, - block_number, - nonce, - work, - U256::from(key), - U256::from(key) - )); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( - &U256::from(key), - &U256::from(key), - netuid, - stakes[key as usize], - ); - } - - // Initilize with first epoch - SubtensorModule::epoch(netuid, 1_000_000_000); - next_block_no_epoch(netuid); - - // Set weights - for uid in 0..(n / 2) { - SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(uid)), - netuid, - ((n / 2)..n).collect(), - vec![u16::MAX / 4, u16::MAX / 2, (u16::MAX / 4) * 3, u16::MAX], - 0 - )); - } - - // Enable Liquid Alpha - SubtensorModule::set_liquid_alpha_enabled(netuid, true); - // Run epoch with Liquid Alpha - if sparse { - SubtensorModule::epoch(netuid, 1_000_000_000); - } else { - SubtensorModule::epoch_dense(netuid, 1_000_000_000); - } - - // Check bonds and emissions - let bonds = SubtensorModule::get_bonds(netuid); - - /* n: 8 - current_block: 2; activity_cutoff: 5000; - Last update: [1, 1, 1, 1, 0, 0, 0, 0] - activity_cutoff: 5000 - Last update: [2, 2, 2, 2, 1, 1, 1, 1] - Inactive: [false, false, false, false, false, false, false, false] - Block at registration: [1, 1, 1, 1, 1, 1, 1, 1] - hotkeys: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)] - Stake: [1, 2, 3, 4, 0, 0, 0, 0] - Normalised Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - validator_permits: [true, true, true, true, true, true, true, true] - max_allowed_validators: 8 - new_validator_permits: [true, true, true, true, true, true, true, true] - Active Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - Weights: [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag+outdate): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (mask+norm): [[(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Ranks (before): [0, 0, 0, 0, 0.099997558, 0.2000012202, 0.2999926745, 0.4000085443] - Consensus: [0, 0, 0, 0, 0.0999975584, 0.2000012207, 0.2999926754, 0.400008545] - Weights: [[(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Validator Trust: [0.9999999995, 0.9999999995, 0.9999999995, 0.9999999995, 0, 0, 0, 0] - Ranks (after): [0, 0, 0, 0, 0.099997558, 0.2000012202, 0.2999926745, 0.4000085443] - T: [0, 0, 0, 0, 1, 1, 1, 1] - Incentive (=Rank): [0, 0, 0, 0, 0.0999975582, 0.2000012207, 0, 0.0999975582, 0.2000012207, 0.2999926752, 0.4000085455] - B: [[], [], [], [], [], [], [], []] - B (outdatedmask): [[], [], [], [], [], [], [], []] - B (mask+norm): [[], [], [], [], [], [], [], []] - ΔB: [[(4, 0.0099997558), (5, 0.020000122), (6, 0.0299992673), (7, 0.0400008543)], [(4, 0.0199995115), (5, 0.040000244), (6, 0.0599985349), (7, 0.0800017088)], [(4, 0.0299992673), (5, 0.060000366), (6, 0.0899978024), (7, 0.1200025633)], [(4, 0.0399990233), (5, 0.080000488), (6, 0.11999707), (7, 0.1600034179)], [], [], [], []] - ΔB (norm): [[(4, 0.0999999996), (5, 0.0999999999), (6, 0.0999999994), (7, 0.0999999996)], [(4, 0.1999999995), (5, 0.2), (6, 0.1999999997), (7, 0.1999999997)], [(4, 0.299999999), (5, 0.2999999998), (6, 0.3), (7, 0.3)], [(4, 0.4000000013), (5, 0.4), (6, 0.4000000004), (7, 0.4000000001)], [], [], [], []] - Exponential Moving Average Bonds Liquid Alpha: [[(4, 0.0499983232), (5, 0.0899999999), (6, 0.0899999994), (7, 0.0899999996)], [(4, 0.0999966469), (5, 0.18), (6, 0.1799999997), (7, 0.1799999997)], [(4, 0.1499949703), (5, 0.2699999998), (6, 0.2699999998), (7, 0.2699999998)], [(4, 0.199993295), (5, 0.3599999999), (6, 0.36), (7, 0.3599999999)], [], [], [], []] - Exponential Moving Average Bonds: [[(4, 0.0999999992), (5, 0.0999999999), (6, 0.0999999994), (7, 0.0999999996)], [(4, 0.1999999995), (5, 0.2), (6, 0.1999999997), (7, 0.1999999997)], [(4, 0.2999999993), (5, 0.2999999998), (6, 0.3), (7, 0.3)], [(4, 0.4000000015), (5, 0.4), (6, 0.4000000004), (7, 0.4000000001)], [], [], [], []] - Dividends: [0.0999999994, 0.1999999997, 0.3, 0.4000000006, 0, 0, 0, 0] - Normalized Server Emission: [0, 0, 0, 0, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] - Server Emission: [0, 0, 0, 0, 49998779, 100000610, 149996337, 200004272] - Normalized Validator Emission: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0, 0, 0, 0] - Validator Emission: [49999999, 99999999, 149999999, 200000000, 0, 0, 0, 0] - Normalized Combined Emission: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] - Combined Emission: [49999999, 99999999, 149999999, 200000000, 49998779, 100000610, 149996337, 200004272] - Pruning Scores: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] - */ - - // Expected bonds calculations - // For uid 0: - // Initial weights: [0.25, 0.5, 0.75, 1.0] - // Active stake: [1, 2, 3, 4] - // ΔB = W◦S = [0.25*1, 0.5*2, 0.75*3, 1.0*4] = [0.25, 1.0, 2.25, 4.0] - // Normalize ΔB: [0.25/7.5, 1.0/7.5, 2.25/7.5, 4.0/7.5] = [0.0333, 0.1333, 0.3, 0.5333] - // Final bonds for netuid: [16383, 32767, 49151, 65535] - - assert_eq!(bonds[0][4], 16383); // Note: Calculated as explained above - assert_eq!(bonds[1][4], 32767); // Note: Calculated as explained above - assert_eq!(bonds[2][4], 49151); // Note: Calculated as explained above - assert_eq!(bonds[3][4], 65535); // Note: Calculated as explained above - - // === Set self-weight only on val1 - let uid = 0; - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(uid)), - netuid, - vec![uid], - vec![u16::MAX], - 0 - )); - next_block_no_epoch(netuid); - if sparse { - SubtensorModule::epoch(netuid, 1_000_000_000); - } else { - SubtensorModule::epoch_dense(netuid, 1_000_000_000); - } - - let bonds = SubtensorModule::get_bonds(netuid); - assert_eq!(bonds[0][4], 2862); - assert_eq!(bonds[1][4], 32767); - assert_eq!(bonds[2][4], 49151); - assert_eq!(bonds[3][4], 65535); - - // === Set self-weight only on val2 - let uid = 1; - assert_ok!(SubtensorModule::set_weights( - RuntimeOrigin::signed(U256::from(uid)), - netuid, - vec![uid], - vec![u16::MAX], - 0 - )); - next_block_no_epoch(netuid); - if sparse { - SubtensorModule::epoch(netuid, 1_000_000_000); - } else { - SubtensorModule::epoch_dense(netuid, 1_000_000_000); - } - let bonds = SubtensorModule::get_bonds(netuid); - - /* n: 8 - current_block: 4; activity_cutoff: 5000; - Last update: [2, 3, 2, 2, 1, 1, 1, 1] - Inactive: [false, false, false, false, false, false, false, false] - Block at registration: [1, 1, 1, 1, 1, 1, 1, 1] - hotkeys: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)] - Stake: [1, 2, 3, 4, 0, 0, 0, 0] - Normalised Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - validator_permits: [true, true, true, true, true, true, true, true] - max_allowed_validators: 64 - new_validator_permits: [true, true, true, true, true, true, true, true] - Active Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] - Weights: [[(0, 65535)], [(1, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit): [[(0, 65535)], [(1, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag): [[], [], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (permit+diag+outdate): [[], [], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] - Weights (mask+norm): [[], [], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Ranks (before): [0, 0, 0, 0, 0.0699982906, 0.1400008542, 0.2099948723, 0.2800059812] - Consensus: [0, 0, 0, 0, 0.0999975584, 0.2000012207, 0.2999926754, 0.400008545] - Weights: [[], [], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] - Validator Trust: [0, 0, 0.9999999995, 0.9999999995, 0, 0, 0, 0] - Ranks (after): [0, 0, 0, 0, 0.0699982906, 0.1400008542, 0.2099948723, 0.2800059812] - T: [0, 0, 0, 0, 1, 1, 1, 1] - Incentive (=Rank): [0, 0, 0, 0, 0.0999975582, 0.2000012207, 0.2999926754, 0.4000085455] - B: [[(4, 7760), (5, 1489), (6, 1489), (7, 1489)], [(4, 32767), (5, 32767), (6, 32767), (7, 32767)], [(4, 49151), (5, 49151), (6, 49151), (7, 49151)], [(4, 65535), (5, 65535), (6, 65535), (7, 65535)], [], [], [], []] - B (outdatedmask): [[(4, 7760), (5, 1489), (6, 1489), (7, 1489)], [(4, 32767), (5, 32767), (6, 32767), (7, 32767)], [(4, 49151), (5, 49151), (6, 49151), (7, 49151)], [(4, 65535), (5, 65535), (6, 65535), (7, 65535)], [], [], [], []] - B (mask+norm): [[(4, 0.0499958121), (5, 0.00999718), (6, 0.00999718), (7, 0.00999718)], [(4, 0.211109894), (5, 0.2199983886), (6, 0.2199983886), (7, 0.2199983886)], [(4, 0.3166680625), (5, 0.3300009398), (6, 0.3300009398), (7, 0.3300009398)], [(4, 0.4222262308), (5, 0.4400034912), (6, 0.4400034912), (7, 0.4400034912)], [], [], [], []] - ΔB: [[], [], [(4, 0.0299992673), (5, 0.060000366), (6, 0.0899978024), (7, 0.1200025633)], [(4, 0.0399990233), (5, 0.080000488), (6, 0.11999707), (7, 0.1600034179)], [], [], [], []] - ΔB (norm): [[], [], [(4, 0.428571427), (5, 0.4285714284), (6, 0.4285714284), (7, 0.4285714284)], [(4, 0.5714285728), (5, 0.5714285714), (6, 0.5714285714), (7, 0.5714285714)], [], [], [], []] - Exponential Moving Average Bonds Liquid Alpha: [[(4, 0.024998744), (5, 0.000999718), (6, 0.000999718), (7, 0.000999718)], [(4, 0.105558486), (5, 0.0219998388), (6, 0.0219998388), (7, 0.0219998388)], [(4, 0.3726178685), (5, 0.4187143792), (6, 0.4187143792), (7, 0.4187143792)], [(4, 0.4968249004), (5, 0.5582860631), (6, 0.5582860631), (7, 0.5582860631)], [], [], [], []] - Exponential Moving Average Bonds: [[(4, 0.024998744), (5, 0.000999718), (6, 0.000999718), (7, 0.000999718)], [(4, 0.105558486), (5, 0.0219998388), (6, 0.0219998388), (7, 0.0219998388)], [(4, 0.3726178687), (5, 0.4187143794), (6, 0.4187143794), (7, 0.4187143794)], [(4, 0.4968249009), (5, 0.5582860636), (6, 0.5582860636), (7, 0.5582860636)], [], [], [], []] - Dividends: [0.0033995616, 0.030355499, 0.4141048414, 0.5521400978, 0, 0, 0, 0] - Normalized Server Emission: [0, 0, 0, 0, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] - Server Emission: [0, 0, 0, 0, 49998779, 100000610, 149996337, 200004272] - Normalized Validator Emission: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0, 0, 0, 0] - Validator Emission: [1699780, 15177749, 207052420, 276070048, 0, 0, 0, 0] - Normalized Combined Emission: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] - Combined Emission: [1699780, 15177749, 207052420, 276070048, 49998779, 100000610, 149996337, 200004272] - Pruning Scores: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] - */ - - assert_eq!(bonds[0][4], 435); - assert_eq!(bonds[1][4], 4985); - assert_eq!(bonds[2][4], 49151); - assert_eq!(bonds[3][4], 65535); - }); -} - -// #[test] fn test_set_alpha_disabled() { new_test_ext(1).execute_with(|| { @@ -1995,7 +1778,7 @@ fn test_zero_weights() { S: [1, 0]; S (mask): [1, 0]; S (mask+norm): [1, 0]; Block at registration: [0, 0] W: [[], []]; W (diagmask): [[], []]; W (diag+outdatemask): [[], []]; W (mask+norm): [[], []] R: [0, 0]; W (threshold): [[], []]; T: [0, 0]; C: [0.006693358, 0.006693358]; I: [0, 0] - B: [[], []]; B (outdatedmask): [[], []]; B (mask+norm): [[], []]; + B: [[], []]; B (mask+norm): [[], []]; ΔB: [[], []]; ΔB (norm): [[], []]; emaB: [[], []]; D: [0, 0] E: [1000000000, 0]; P: [1, 0] */ for validator in 0..(n / 2) { @@ -2031,7 +1814,7 @@ fn test_zero_weights() { W: [[], [(1, 1)]] W (diagmask): [[], []]; W (diag+outdatemask): [[], []]; W (mask+norm): [[], []] R: [0, 0]; W (threshold): [[], []]; T: [0, 0]; C: [0.006693358, 0.006693358]; I: [0, 0] - B: [[], []]: B (outdatedmask): [[], []]; B (mask+norm): [[], []] + B: [[], []]: B (mask+norm): [[], []] ΔB: [[], []]; ΔB (norm): [[], []]; emaB: [[], []]; D: [0, 0] E: [1000000000, 0]; P: [1, 0] */ for validator in 0..(n / 2) { @@ -2086,7 +1869,7 @@ fn test_zero_weights() { S: [1, 0]; S (mask): [1, 0]; S (mask+norm): [1, 0]; Block at registration: [0, 2]; W: [[(1, 1)], []]; W (diagmask): [[(1, 1)], []]; W (diag+outdatemask): [[], []]; W (mask+norm): [[], []]; R: [0, 0]; W (threshold): [[], []]; T: [0, 0]; C: [0.006693358, 0.006693358]; I: [0, 0]; - B: [[], []]; B (outdatedmask): [[], []]; B (mask+norm): [[], []]; + B: [[], []]; B (mask+norm): [[], []]; ΔB: [[], []]; ΔB (norm): [[], []]; emaB: [[], []]; D: [0, 0]; E: [1000000000, 0]; P: [1, 0] */ for validator in 0..(n / 2) { @@ -2120,7 +1903,7 @@ fn test_zero_weights() { S: [1, 0]; S (mask): [1, 0]; S (mask+norm): [1, 0]; Block at registration: [0, 2]; W: [[(1, 1)], []]; W (diagmask): [[(1, 1)], []]; W (diag+outdatemask): [[(1, 1)], []]; W (mask+norm): [[(1, 1)], []]; R: [0, 1]; W (threshold): [[(1, 1)], []]; T: [0, 1]; C: [0.006693358, 0.9933076561]; I: [0, 1]; - B: [[], []]; B (outdatedmask): [[], []]; B (mask+norm): [[], []]; + B: [[], []]; B (mask+norm): [[], []]; ΔB: [[(1, 1)], []]; ΔB (norm): [[(1, 1)], []]; emaB: [[(1, 1)], []]; D: [1, 0]; emaB (max-upscale): [[(1, 1)], []] E: [500000000, 500000000]; P: [0.5, 0.5] */ for validator in 0..n { @@ -2438,333 +2221,6 @@ fn test_validator_permits() { } } -#[test] -fn test_compute_alpha_values() { - // Define the consensus values. - let consensus = vec![ - I32F32::from_num(0.1), - I32F32::from_num(0.5), - I32F32::from_num(0.9), - ]; - // Define the logistic function parameters 'a' and 'b'. - let a = I32F32::from_num(1.0); - let b = I32F32::from_num(0.0); - - // Compute the alpha values using the function. - let alpha = SubtensorModule::compute_alpha_values(&consensus, a, b); - - // Ensure the length of the alpha vector matches the consensus vector. - assert_eq!(alpha.len(), consensus.len()); - - // Manually compute the expected alpha values for each consensus value. - // The logistic function is: 1 / (1 + exp(b - a * c)) - // where c is the consensus value. - - // For consensus[0] = 0.1: - // exp_val = exp(0.0 - 1.0 * 0.1) = exp(-0.1) - // alpha[0] = 1 / (1 + exp(-0.1)) ~ 0.9048374180359595 - let exp_val_0 = I32F32::from_num(0.9048374180359595); - let expected_alpha_0 = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val_0); - - // For consensus[1] = 0.5: - // exp_val = exp(0.0 - 1.0 * 0.5) = exp(-0.5) - // alpha[1] = 1 / (1 + exp(-0.5)) ~ 0.6065306597126334 - let exp_val_1 = I32F32::from_num(0.6065306597126334); - let expected_alpha_1 = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val_1); - - // For consensus[2] = 0.9: - // exp_val = exp(0.0 - 1.0 * 0.9) = exp(-0.9) - // alpha[2] = 1 / (1 + exp(-0.9)) ~ 0.4065696597405991 - let exp_val_2 = I32F32::from_num(0.4065696597405991); - let expected_alpha_2 = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val_2); - - // Define an epsilon for approximate equality checks. - let epsilon = I32F32::from_num(1e-6); - - // Assert that the computed alpha values match the expected values within the epsilon. - assert_approx_eq(alpha[0], expected_alpha_0, epsilon); - assert_approx_eq(alpha[1], expected_alpha_1, epsilon); - assert_approx_eq(alpha[2], expected_alpha_2, epsilon); -} - -#[test] -fn test_compute_alpha_values_256_miners() { - // Define the consensus values for 256 miners. - let consensus: Vec = (0..256) - .map(|i| I32F32::from_num(i as f32 / 255.0)) - .collect(); - // Define the logistic function parameters 'a' and 'b'. - let a = I32F32::from_num(1.0); - let b = I32F32::from_num(0.0); - - // Compute the alpha values using the function. - let alpha = SubtensorModule::compute_alpha_values(&consensus, a, b); - - // Ensure the length of the alpha vector matches the consensus vector. - assert_eq!(alpha.len(), consensus.len()); - - // Define an epsilon for approximate equality checks. - let epsilon = I32F32::from_num(1e-6); - - for (i, &c) in consensus.iter().enumerate() { - // Use saturating subtraction and multiplication - let exponent = b - (a * c); - - // Use safe_exp instead of exp - let exp_val = safe_exp(exponent); - - // Use saturating addition and division - let expected_alpha = I32F32::from_num(1.0) / (I32F32::from_num(1.0) + exp_val); - - // Assert that the computed alpha values match the expected values within the epsilon. - assert_approx_eq(alpha[i], expected_alpha, epsilon); - } -} - -#[test] -fn test_clamp_alpha_values() { - // Define the alpha values. - let alpha = vec![ - I32F32::from_num(0.1), - I32F32::from_num(0.5), - I32F32::from_num(0.9), - ]; - // Define the high and low clamping values. - let alpha_high = I32F32::from_num(0.8); - let alpha_low = I32F32::from_num(0.2); - - // Compute the clamped alpha values using the function. - let clamped_alpha = SubtensorModule::clamp_alpha_values(alpha.clone(), alpha_high, alpha_low); - - // Ensure the length of the clamped alpha vector matches the original alpha vector. - assert_eq!(clamped_alpha.len(), alpha.len()); - - // Manually compute the expected clamped alpha values for each alpha value. - // The clamping logic is: max(alpha_low, min(alpha_high, a)) - - // For alpha[0] = 0.1: - // clamped_a = max(0.2, min(0.8, 0.1)) = max(0.2, 0.1) = 0.2 - let expected_clamped_alpha_0 = I32F32::from_num(0.2); - - // For alpha[1] = 0.5: - // clamped_a = max(0.2, min(0.8, 0.5)) = max(0.2, 0.5) = 0.5 - let expected_clamped_alpha_1 = I32F32::from_num(0.5); - - // For alpha[2] = 0.9: - // clamped_a = max(0.2, min(0.8, 0.9)) = max(0.2, 0.8) = 0.8 - let expected_clamped_alpha_2 = I32F32::from_num(0.8); - - // Assert that the computed clamped alpha values match the expected values. - assert_eq!(clamped_alpha[0], expected_clamped_alpha_0); - assert_eq!(clamped_alpha[1], expected_clamped_alpha_1); - assert_eq!(clamped_alpha[2], expected_clamped_alpha_2); -} - -#[test] -fn test_calculate_logistic_params() { - // Define test inputs - let alpha_high = I32F32::from_num(0.9); - let alpha_low = I32F32::from_num(0.1); - let consensus_high = I32F32::from_num(0.8); - let consensus_low = I32F32::from_num(0.2); - - // Expected values - // a = (ln((1 / alpha_high - 1)) - ln((1 / alpha_low - 1))) / (consensus_low - consensus_high) - // = (ln((1 / 0.9 - 1)) - ln((1 / 0.1 - 1))) / (0.2 - 0.8) - // = (ln(0.1111) - ln(9)) / -0.6 - // = (-2.1972 - 2.1972) / -0.6 - // = -4.3944 / -0.6 - // = 7.324 - let expected_a = I32F32::from_num(7.324); - - // b = ln((1 / alpha_low - 1)) + a * consensus_low - // = ln((1 / 0.1 - 1)) + 7.324 * 0.2 - // = ln(9) + 1.4648 - // = 2.1972 + 1.4648 - // = 3.662 - let expected_b = I32F32::from_num(3.662); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert!( - (a - expected_a).abs() < I32F32::from_num(0.001), - "Expected a: {:?}, got: {:?}", - expected_a, - a - ); - assert!( - (b - expected_b).abs() < I32F32::from_num(0.001), - "Expected b: {:?}, got: {:?}", - expected_b, - b - ); -} - -#[test] -fn test_calculate_logistic_params_edge_cases() { - // Edge Case 1: Alpha values at their boundaries (0 and 1) - let alpha_high = I32F32::from_num(1.0); - let alpha_low = I32F32::from_num(0.0); - let consensus_high = I32F32::from_num(0.8); - let consensus_low = I32F32::from_num(0.2); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); - assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); - - // Edge Case 2: Consensus values at their boundaries (0 and 1) - let alpha_high = I32F32::from_num(0.9); - let alpha_low = I32F32::from_num(0.1); - let consensus_high = I32F32::from_num(1.0); - let consensus_low = I32F32::from_num(0.0); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Expected values - // a = (ln((1 / 0.9 - 1)) - ln((1 / 0.1 - 1))) / (0.0 - 1.0) - // = (ln(0.1111) - ln(9)) / -1.0 - // = (-2.1972 - 2.1972) / -1.0 - // = -4.3944 / -1.0 - // = 4.3944 - let expected_a = I32F32::from_num(4.3944); - - // b = ln((1 / 0.1 - 1)) + a * 0.0 - // = ln(9) + 0 - // = 2.1972 - let expected_b = I32F32::from_num(2.1972); - - // Assert the results - assert!( - (a - expected_a).abs() < I32F32::from_num(0.001), - "Expected a: {:?}, got: {:?}", - expected_a, - a - ); - assert!( - (b - expected_b).abs() < I32F32::from_num(0.001), - "Expected b: {:?}, got: {:?}", - expected_b, - b - ); - - // Edge Case 3: Alpha values being equal - let alpha_high = I32F32::from_num(0.5); - let alpha_low = I32F32::from_num(0.5); - let consensus_high = I32F32::from_num(0.8); - let consensus_low = I32F32::from_num(0.2); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); - assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); - - // Edge Case 4: Consensus values being equal - let alpha_high = I32F32::from_num(0.9); - let alpha_low = I32F32::from_num(0.1); - let consensus_high = I32F32::from_num(0.5); - let consensus_low = I32F32::from_num(0.5); - - // Call the function - let (a, b) = SubtensorModule::calculate_logistic_params( - alpha_high, - alpha_low, - consensus_high, - consensus_low, - ); - - // Assert the results - assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); - assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); -} - -#[test] -fn test_compute_ema_bonds_with_liquid_alpha_sparse() { - // Define test inputs - let bonds_delta = vec![ - vec![(0, I32F32::from_num(0.1)), (1, I32F32::from_num(0.2))], - vec![(0, I32F32::from_num(0.3)), (1, I32F32::from_num(0.4))], - ]; - let bonds = vec![ - vec![(0, I32F32::from_num(0.5)), (1, I32F32::from_num(0.6))], - vec![(0, I32F32::from_num(0.7)), (1, I32F32::from_num(0.8))], - ]; - let alpha = vec![I32F32::from_num(0.9), I32F32::from_num(0.8)]; - - // Expected values - // EMA calculation for each bond: - // EMA = alpha * bond_delta + (1 - alpha) * bond - // For bond (0, 0): - // EMA = 0.9 * 0.1 + (1 - 0.9) * 0.5 = 0.09 + 0.05 = 0.14 - // For bond (0, 1): - // EMA = 0.8 * 0.2 + (1 - 0.8) * 0.6 = 0.16 + 0.12 = 0.28 - // For bond (1, 0): - // EMA = 0.9 * 0.3 + (1 - 0.9) * 0.7 = 0.27 + 0.07 = 0.34 - // For bond (1, 1): - // EMA = 0.8 * 0.4 + (1 - 0.8) * 0.8 = 0.32 + 0.16 = 0.48 - let expected_ema_bonds = vec![ - vec![(0, I32F32::from_num(0.14)), (1, I32F32::from_num(0.28))], - vec![(0, I32F32::from_num(0.34)), (1, I32F32::from_num(0.48))], - ]; - - // Call the function - let ema_bonds = - SubtensorModule::compute_ema_bonds_with_liquid_alpha_sparse(&bonds_delta, &bonds, alpha); - - // Assert the results with an epsilon for approximate equality - let epsilon = I32F32::from_num(1e-6); - assert_approx_eq_vec_of_vec(&ema_bonds, &expected_ema_bonds, epsilon); -} - -#[test] -fn test_compute_ema_bonds_with_liquid_alpha_sparse_empty() { - // Test with empty inputs - let bonds_delta: Vec> = vec![]; - let bonds: Vec> = vec![]; - let alpha: Vec = vec![]; - - // Expected values: Empty Vec - let expected_ema_bonds: Vec> = vec![]; - - // Call the function - let ema_bonds = - SubtensorModule::compute_ema_bonds_with_liquid_alpha_sparse(&bonds_delta, &bonds, alpha); - - // Assert the results - assert_eq!( - ema_bonds, expected_ema_bonds, - "Expected EMA bonds: {:?}, got: {:?}", - expected_ema_bonds, ema_bonds - ); -} - #[test] fn test_get_set_alpha() { new_test_ext(1).execute_with(|| { @@ -3171,29 +2627,854 @@ pub fn assert_approx_eq(left: I32F32, right: I32F32, epsilon: I32F32) { } } -/// Helper function to assert approximate equality of two vectors of vectors of tuples. -fn assert_approx_eq_vec_of_vec( - left: &[Vec<(u16, I32F32)>], - right: &[Vec<(u16, I32F32)>], - epsilon: I32F32, -) { - assert_eq!(left.len(), right.len(), "Vectors have different lengths"); - for (left_row, right_row) in left.iter().zip(right.iter()) { - assert_eq!( - left_row.len(), - right_row.len(), - "Rows have different lengths" +// test Yuma 3 scenarios over a sequence of epochs. +fn setup_yuma_3_scenario(netuid: u16, n: u16, sparse: bool, max_stake: u64, stakes: Vec) { + let block_number = System::block_number(); + let tempo: u16 = 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + add_network(netuid, tempo, 0); + + SubtensorModule::set_max_allowed_uids(netuid, n); + assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_min_allowed_weights(netuid, 1); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::set_bonds_penalty(netuid, 0); + SubtensorModule::set_alpha_sigmoid_steepness(netuid, 10); + SubtensorModule::set_bonds_moving_average(netuid, 975_000); + + // === Register + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), ); - for ((left_idx, left_val), (right_idx, right_val)) in left_row.iter().zip(right_row.iter()) - { - assert_eq!(left_idx, right_idx, "Indices are different"); - assert!( - (left_val - right_val).abs() < epsilon, - "Values are different: left = {:?}, right = {:?}, epsilon = {:?}", - left_val, - right_val, - epsilon - ); + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stakes[key as usize], + ); + } + assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + + // Enable Liquid Alpha + SubtensorModule::set_kappa(netuid, u16::MAX / 2); + SubtensorModule::set_liquid_alpha_enabled(netuid, true); + SubtensorModule::set_alpha_values_32(netuid, I32F32::from_num(0.1), I32F32::from_num(0.3)); + + // Enable Yuma3 + SubtensorModule::set_yuma3_enabled(netuid, true); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, 3); + + // run first epoch to set allowed validators + // run to next block to ensure weights are set on nodes after their registration block + run_epoch(netuid, sparse); +} + +fn run_epoch(netuid: u16, sparse: bool) { + next_block_no_epoch(netuid); + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } +} + +fn run_epoch_and_check_bonds_dividends( + netuid: u16, + sparse: bool, + target_bonds: &[Vec], + target_dividends: &[f32], +) { + run_epoch(netuid, sparse); + let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid); + let dividends = SubtensorModule::get_dividends(netuid); + + let epsilon = I32F32::from_num(1e-3); + // Check the bonds + for (bond, target_bond) in bonds.iter().zip(target_bonds.iter()) { + // skip the 3 validators + for (b, t) in bond.iter().zip(target_bond.iter().skip(3)) { + assert_approx_eq(*b, fixed(*t), epsilon); } } + // Check the dividends + for (dividend, target_dividend) in dividends.iter().zip(target_dividends.iter()) { + assert_approx_eq( + u16_proportion_to_fixed(*dividend), + fixed(*target_dividend), + epsilon, + ); + } +} + +fn set_yuma_3_weights(netuid: u16, weights: Vec>, indices: Vec) { + for (uid, weight) in weights.iter().enumerate() { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid as u64)), + netuid, + indices.clone(), + weight.to_vec(), + 0 + )); + } +} + +#[test] +fn test_yuma_3_kappa_moves_first() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) - moves first + // Validator B: Small eager validator (0.1) - moves second + // Validator C: Small lazy validator (0.1) - moves last + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.0908, 0.1013], + vec![0.3697, 0.0000], + vec![0.3697, 0.0000], + ], + vec![ + vec![0.0815, 0.1924], + vec![0.3170, 0.1013], + vec![0.5580, 0.0000], + ], + vec![ + vec![0.0731, 0.2742], + vec![0.2765, 0.1924], + vec![0.4306, 0.1013], + ], + vec![ + vec![0.0656, 0.3478], + vec![0.2435, 0.2742], + vec![0.3589, 0.1924], + ], + vec![ + vec![0.0588, 0.4139], + vec![0.2157, 0.3478], + vec![0.3089, 0.2742], + ], + ]; + + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000], + vec![1.0000, 0.0000, 0.0000, 0.0000, 0.0000], + vec![0.9382, 0.0618, 0.0000, 0.0000, 0.0000], + vec![0.8819, 0.0773, 0.0407, 0.0000, 0.0000], + vec![0.8564, 0.0844, 0.0592, 0.0000, 0.0000], + vec![0.8418, 0.0884, 0.0697, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // Initially, consensus is achieved by all Validators + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // Validator A -> Server 2 + // Validator B -> Server 1 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![u16::MAX, 0], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 2 => { + // Validator A -> Server 2 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 3 => { + // Subsequent epochs All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_kappa_moves_second() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) - moves second + // Validator B: Small eager validator (0.1) - moves first + // Validator C: Small lazy validator (0.1) - moves last + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.1924, 0.0000], + vec![0.0908, 0.2987], + vec![0.1924, 0.0000], + ], + vec![ + vec![0.1715, 0.1013], + vec![0.0815, 0.3697], + vec![0.4336, 0.0000], + ], + vec![ + vec![0.1531, 0.1924], + vec![0.0731, 0.4336], + vec![0.3608, 0.1013], + ], + vec![ + vec![0.1369, 0.2742], + vec![0.0656, 0.4910], + vec![0.3103, 0.1924], + ], + vec![ + vec![0.1225, 0.3478], + vec![0.0588, 0.5426], + vec![0.2712, 0.2742], + ], + ]; + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000], + vec![0.8446, 0.0498, 0.1056, 0.0000, 0.0000], + vec![0.6868, 0.3132, 0.0000, 0.0000, 0.0000], + vec![0.7421, 0.2090, 0.0489, 0.0000, 0.0000], + vec![0.7625, 0.1706, 0.0669, 0.0000, 0.0000], + vec![0.7730, 0.1508, 0.0762, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // Initially, consensus is achieved by all Validators + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 2 => { + // Validator A -> Server 2 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 3 => { + // Subsequent epochs All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_kappa_moves_last() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) - moves last + // Validator B: Small eager validator (0.1) - moves first + // Validator C: Small lazy validator (0.1) - moves second + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.1924, 0.0000], + vec![0.0908, 0.2987], + vec![0.1924, 0.0000], + ], + vec![ + vec![0.2742, 0.0000], + vec![0.0815, 0.5081], + vec![0.1715, 0.2987], + ], + vec![ + vec![0.2416, 0.1013], + vec![0.0731, 0.5580], + vec![0.1531, 0.3697], + ], + vec![ + vec![0.2141, 0.1924], + vec![0.0656, 0.6028], + vec![0.1369, 0.4336], + ], + vec![ + vec![0.1903, 0.2742], + vec![0.0588, 0.6430], + vec![0.1225, 0.4910], + ], + ]; + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000], + vec![0.8446, 0.0498, 0.1056, 0.0000, 0.0000], + vec![0.8966, 0.0333, 0.0701, 0.0000, 0.0000], + vec![0.4663, 0.3210, 0.2127, 0.0000, 0.0000], + vec![0.5976, 0.2340, 0.1683, 0.0000, 0.0000], + vec![0.6592, 0.1932, 0.1475, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // Initially, consensus is achieved by all Validators + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + 2 => { + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![0, u16::MAX]], + vec![3, 4], + ); + } + 3 => { + // Subsequent epochs All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_one_epoch_switch() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // Equal stake validators + let stakes: Vec = vec![33, 33, 34, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + + let targets_bonds = [ + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + vec![ + vec![0.1924, 0.0000], + vec![0.1924, 0.0000], + vec![0.1924, 0.0000], + ], + vec![ + vec![0.2742, 0.0000], + vec![0.2742, 0.0000], + vec![0.1715, 0.2987], + ], + vec![ + vec![0.3478, 0.0000], + vec![0.3478, 0.0000], + vec![0.2554, 0.2618], + ], + vec![ + vec![0.4139, 0.0000], + vec![0.4139, 0.0000], + vec![0.3309, 0.2312], + ], + vec![ + vec![0.4733, 0.0000], + vec![0.4733, 0.0000], + vec![0.3987, 0.2051], + ], + ]; + let targets_dividends = [ + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3782, 0.3782, 0.2436, 0.0000, 0.0000], + vec![0.3628, 0.3628, 0.2745, 0.0000, 0.0000], + vec![0.3541, 0.3541, 0.2917, 0.0000, 0.0000], + vec![0.3487, 0.3487, 0.3026, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 2 => { + // Validator A -> Server 1 + // Validator B -> Server 1 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![u16::MAX, 0], vec![0, u16::MAX]], + vec![3, 4], + ); + } + _ => { + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_liquid_alpha_disabled() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let n: u16 = 5; // 3 validators, 2 servers + let max_stake: u64 = 8; + + // Equal stake validators + let stakes: Vec = vec![33, 33, 34, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + + // disable liquid alpha + SubtensorModule::set_liquid_alpha_enabled(netuid, false); + + let targets_bonds = [ + vec![ + vec![0.0000, 0.0250, 0.0000], + vec![0.0000, 0.0250, 0.0000], + vec![0.0000, 0.0250, 0.0000], + ], + vec![ + vec![0.0000, 0.0494, 0.0000], + vec![0.0000, 0.0494, 0.0000], + vec![0.0000, 0.0494, 0.0000], + ], + vec![ + vec![0.0000, 0.0731, 0.0000], + vec![0.0000, 0.0731, 0.0000], + vec![0.0000, 0.0481, 0.0250], + ], + vec![ + vec![0.0000, 0.0963, 0.0000], + vec![0.0000, 0.0963, 0.0000], + vec![0.0000, 0.0719, 0.0244], + ], + vec![ + vec![0.0000, 0.1189, 0.0000], + vec![0.0000, 0.1189, 0.0000], + vec![0.0000, 0.0951, 0.0238], + ], + vec![ + vec![0.0000, 0.1409, 0.0000], + vec![0.0000, 0.1409, 0.0000], + vec![0.0000, 0.1178, 0.0232], + ], + ]; + let targets_dividends = [ + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3300, 0.3300, 0.3400, 0.0000, 0.0000], + vec![0.3734, 0.3734, 0.2532, 0.0000, 0.0000], + vec![0.3611, 0.3611, 0.2779, 0.0000, 0.0000], + vec![0.3541, 0.3541, 0.2919, 0.0000, 0.0000], + vec![0.3495, 0.3495, 0.3009, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 2 => { + // Validator A -> Server 1 + // Validator B -> Server 1 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![u16::MAX, 0], vec![0, u16::MAX]], + vec![3, 4], + ); + } + _ => { + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_stable_miner() { + for sparse in [true, false].iter() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let n: u16 = 6; // 3 validators, 3 servers + let max_stake: u64 = 8; + + // Validator A: kappa / Big validator (0.8) + // Validator B: Small eager validator (0.1) + // Validator C: Small lazy validator (0.1) + let stakes: Vec = vec![8, 1, 1, 0, 0, 0]; + + setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes); + let targets_bonds = [ + vec![ + vec![0.0507, 0.0000, 0.0507], + vec![0.0507, 0.0000, 0.0507], + vec![0.0507, 0.0000, 0.0507], + ], + vec![ + vec![0.0962, 0.0000, 0.0962], + vec![0.0455, 0.1000, 0.0962], + vec![0.0962, 0.0000, 0.0962], + ], + vec![ + vec![0.0863, 0.0507, 0.1371], + vec![0.0408, 0.1405, 0.1371], + vec![0.1770, 0.0000, 0.1371], + ], + vec![ + vec![0.0774, 0.0962, 0.1739], + vec![0.0367, 0.1770, 0.1739], + vec![0.1579, 0.0507, 0.1739], + ], + vec![ + vec![0.0694, 0.1371, 0.2069], + vec![0.0329, 0.2097, 0.2069], + vec![0.1411, 0.0962, 0.2069], + ], + vec![ + vec![0.0623, 0.1739, 0.2366], + vec![0.0296, 0.2391, 0.2366], + vec![0.1263, 0.1371, 0.2366], + ], + ]; + let targets_dividends = [ + vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000, 0.0000], + vec![0.8226, 0.0745, 0.1028, 0.0000, 0.0000, 0.0000], + vec![0.7750, 0.1685, 0.0565, 0.0000, 0.0000, 0.0000], + vec![0.7864, 0.1372, 0.0764, 0.0000, 0.0000, 0.0000], + vec![0.7912, 0.1241, 0.0847, 0.0000, 0.0000, 0.0000], + vec![0.7937, 0.1173, 0.0890, 0.0000, 0.0000, 0.0000], + ]; + + for (epoch, (target_bonds, target_dividends)) in targets_bonds + .iter() + .zip(targets_dividends.iter()) + .enumerate() + { + match epoch { + 0 => { + // all validators 0.5 for first and third server + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX / 2, 0, u16::MAX / 2]; 3], + vec![3, 4, 5], + ); + } + 1 => { + // one of small validators moves 0.5 to seconds server + set_yuma_3_weights( + netuid, + vec![ + vec![u16::MAX / 2, 0, u16::MAX / 2], + vec![0, u16::MAX / 2, u16::MAX / 2], + vec![u16::MAX / 2, 0, u16::MAX / 2], + ], + vec![3, 4, 5], + ); + } + 2 => { + // big validator follows + set_yuma_3_weights( + netuid, + vec![ + vec![0, u16::MAX / 2, u16::MAX / 2], + vec![0, u16::MAX / 2, u16::MAX / 2], + vec![u16::MAX / 2, 0, u16::MAX / 2], + ], + vec![3, 4, 5], + ); + } + 3 => { + // Subsequent epochs all validators have moves + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX / 2, u16::MAX / 2]; 3], + vec![3, 4, 5], + ); + } + _ => {} + }; + run_epoch_and_check_bonds_dividends( + netuid, + *sparse, + target_bonds, + target_dividends, + ); + } + }) + } +} + +#[test] +fn test_yuma_3_bonds_reset() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 5; // 3 validators, 2 servers + let netuid: u16 = 1; + let max_stake: u64 = 8; + + // "Case 8 - big vali moves late, then late" + // Big dishonest lazy vali. (0.8) + // Small eager-eager vali. (0.1) + // Small eager-eager vali 2. (0.1) + let stakes: Vec = vec![8, 1, 1, 0, 0]; + + setup_yuma_3_scenario(netuid, n, sparse, max_stake, stakes); + SubtensorModule::set_bonds_reset(netuid, true); + + // target bonds and dividends for specific epoch + let targets_dividends: std::collections::HashMap<_, _> = [ + (0, vec![0.8000, 0.1000, 0.1000, 0.0000, 0.0000]), + (1, vec![0.8944, 0.0528, 0.0528, 0.0000, 0.0000]), + (2, vec![0.5230, 0.2385, 0.2385, 0.0000, 0.0000]), + (19, vec![0.7919, 0.1040, 0.1040, 0.0000, 0.0000]), + (20, vec![0.7928, 0.1036, 0.1036, 0.0000, 0.0000]), + (21, vec![0.8467, 0.0766, 0.0766, 0.0000, 0.0000]), + (40, vec![0.7928, 0.1036, 0.1036, 0.0000, 0.0000]), + ] + .into_iter() + .collect(); + let targets_bonds: std::collections::HashMap<_, _> = [ + ( + 0, + vec![ + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + vec![0.1013, 0.0000], + ], + ), + ( + 1, + vec![ + vec![0.1924, 0.0000], + vec![0.0908, 0.2987], + vec![0.0908, 0.2987], + ], + ), + ( + 2, + vec![ + vec![0.1715, 0.1013], + vec![0.0815, 0.3697], + vec![0.0815, 0.3697], + ], + ), + ( + 19, + vec![ + vec![0.0269, 0.8539], + vec![0.0131, 0.8975], + vec![0.0131, 0.8975], + ], + ), + ( + 20, + vec![ + vec![0.0000, 0.8687], + vec![0.0000, 0.9079], + vec![0.0000, 0.9079], + ], + ), + ( + 21, + vec![ + vec![0.0000, 0.8820], + vec![0.2987, 0.6386], + vec![0.2987, 0.6386], + ], + ), + ( + 40, + vec![ + vec![0.8687, 0.0578], + vec![0.9079, 0.0523], + vec![0.9079, 0.0523], + ], + ), + ] + .into_iter() + .collect(); + + for epoch in 0..=40 { + match epoch { + 0 => { + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + 1 => { + // validators B, C switch + // Validator A -> Server 1 + // Validator B -> Server 2 + // Validator C -> Server 2 + set_yuma_3_weights( + netuid, + vec![vec![u16::MAX, 0], vec![0, u16::MAX], vec![0, u16::MAX]], + vec![3, 4], + ); + } + (2..=20) => { + // validator A copies weights + // All validators -> Server 2 + set_yuma_3_weights(netuid, vec![vec![0, u16::MAX]; 3], vec![3, 4]); + if epoch == 20 { + let hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, 3) + .expect("Hotkey not found"); + let _ = SubtensorModule::do_reset_bonds(netuid, &hotkey); + } + } + 21 => { + // validators B, C switch back + // Validator A -> Server 2 + // Validator B -> Server 1 + // Validator C -> Server 1 + set_yuma_3_weights( + netuid, + vec![vec![0, u16::MAX], vec![u16::MAX, 0], vec![u16::MAX, 0]], + vec![3, 4], + ); + } + _ => { + // validator A copies weights + // All validators -> Server 1 + set_yuma_3_weights(netuid, vec![vec![u16::MAX, 0]; 3], vec![3, 4]); + } + }; + + if let Some((target_dividend, target_bond)) = + targets_dividends.get(&epoch).zip(targets_bonds.get(&epoch)) + { + run_epoch_and_check_bonds_dividends(netuid, sparse, target_bond, target_dividend); + } else { + run_epoch(netuid, sparse); + } + } + }) } diff --git a/pallets/subtensor/src/tests/evm.rs b/pallets/subtensor/src/tests/evm.rs index bdd55c1961..fd0ea51061 100644 --- a/pallets/subtensor/src/tests/evm.rs +++ b/pallets/subtensor/src/tests/evm.rs @@ -21,6 +21,14 @@ fn public_to_evm_key(pubkey: &ecdsa::Public) -> H160 { H160::from(address) } +fn sign_evm_message>(pair: &ecdsa::Pair, message: M) -> ecdsa::Signature { + let hash = SubtensorModule::hash_message_eip191(message); + let mut sig = pair.sign_prehashed(&hash); + // Adjust the v value to either 27 or 28 + sig.0[64] += 27; + sig +} + #[test] fn test_associate_evm_key_success() { new_test_ext(1).execute_with(|| { @@ -47,8 +55,7 @@ fn test_associate_evm_key_success() { let mut message = [0u8; 64]; message[..32].copy_from_slice(hotkey_bytes.as_ref()); message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( RuntimeOrigin::signed(coldkey), @@ -94,11 +101,8 @@ fn test_associate_evm_key_different_block_number_success() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( RuntimeOrigin::signed(coldkey), @@ -141,11 +145,8 @@ fn test_associate_evm_key_coldkey_does_not_own_hotkey() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); assert_err!( SubtensorModule::associate_evm_key( @@ -182,11 +183,8 @@ fn test_associate_evm_key_hotkey_not_registered_in_subnet() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); - let hashed_message = keccak_256(message.as_ref()); - let signature = pair.sign_prehashed(&hashed_message); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); assert_err!( SubtensorModule::associate_evm_key( @@ -225,9 +223,7 @@ fn test_associate_evm_key_using_wrong_hash_function() { let hashed_block_number = keccak_256(block_number.encode().as_ref()); let hotkey_bytes = hotkey.encode(); - let mut message = [0u8; 64]; - message[..32].copy_from_slice(hotkey_bytes.as_ref()); - message[32..].copy_from_slice(hashed_block_number.as_ref()); + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); let hashed_message = blake2_256(message.as_ref()); let signature = pair.sign_prehashed(&hashed_message); diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index c70da2c9d2..01e02742b7 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1221,42 +1221,59 @@ fn test_math_vec_mask_sparse_matrix() { } #[test] -fn test_math_scalar_vec_mask_sparse_matrix() { - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 8., 9.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let scalar: u64 = 1; - let masking_vector: Vec = vec![1, 4, 7]; - let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a == b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); +fn test_math_vec_mul() { + let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); + let target: Vec = vec_to_fixed(&[1., 4., 9., 16.]); + let result = vec_mul(&vector, &vector); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let vector_empty: Vec = vec_to_fixed(&[]); + let result = vec_mul(&vector_empty, &vector); + let target: Vec = vec![]; + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let vector_zero: Vec = vec_to_fixed(&[0., 0., 0., 0., 0., 0., 0., 0.]); + let result = vec_mul(&vector_zero, &vector); + let target: Vec = vec![I32F32::from_num(0); 4]; + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![1., 2., 0., 4., 5., 0., 7., 8., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let scalar: u64 = 5; - let masking_vector: Vec = vec![1, 4, 7]; - let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a <= b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); +#[test] +fn test_math_mat_vec_mul() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let vector: Vec = vec_to_fixed(&[1., 2., 3.]); + let target: Vec = vec![1., 4., 9., 4., 10., 18., 7., 16., 27., 10., 22., 36.]; + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_vec_mul(&matrix, &vector); + assert_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_one: Vec = vec_to_fixed(&[1., 0., 0.]); + let target: Vec = vec![1., 0., 0., 4., 0., 0., 7., 0., 0., 10., 0., 0.]; + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_vec_mul(&matrix, &vector_one); + assert_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_empty: Vec = vec_to_fixed(&[]); + let result = mat_vec_mul(&matrix, &vector_empty); + let target: Vec> = vec![vec![]; 4]; + assert_mat_compare(&result, &target, I32F32::from_num(0)); +} - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 0., 3., 0., 0., 6., 0., 0., 9.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let scalar: u64 = 5; - let masking_vector: Vec = vec![1, 4, 7]; - let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a >= b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); +#[test] +fn test_math_mat_vec_mul_sparse() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let vector: Vec = vec_to_fixed(&[1., 2., 3.]); + let target: Vec = vec![1., 4., 9., 4., 10., 18., 7., 16., 27., 10., 22., 36.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_vec_mul_sparse(&matrix, &vector); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_one: Vec = vec_to_fixed(&[1., 0., 0.]); + let target: Vec = vec![1., 0., 0., 4., 0., 0., 7., 0., 0., 10., 0., 0.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_vec_mul_sparse(&matrix, &vector_one); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let vector_empty: Vec = vec_to_fixed(&[]); + let result = mat_vec_mul_sparse(&matrix, &vector_empty); + let target = vec![vec![]; 4]; + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } #[test] @@ -2133,89 +2150,116 @@ fn test_math_hadamard_sparse() { } #[test] -fn test_math_mat_ema() { - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., +fn test_math_mat_ema_alpha() { + let old: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, ]; + let new: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; let target: Vec = vec![ - 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, + 0.19, 0.38, 1., 0.436, 0.545, 0.6539, 0.763, 0.8719, 0.981, 1., 1., 1., ]; + let old = vec_to_mat_fixed(&old, 4, false); let new = vec_to_mat_fixed(&new, 4, false); let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(0.1)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha(&new, &old, &alphas); + assert_mat_compare(&result, &target, I32F32::from_num(1e-4)); + let old: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, + ]; let new: Vec = vec![ 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., ]; - let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let target: Vec = vec![ + 0.10, 0.2, 1., 0.0399, 0.05, 0.0599, 0.07, 0.07999, 0.09, 0.1, 0.10999, 0.11999, + ]; let old = vec_to_mat_fixed(&old, 4, false); let new = vec_to_mat_fixed(&new, 4, false); let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(0)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let alphas = vec_to_mat_fixed(&[0.; 12], 4, false); + let result = mat_ema_alpha(&new, &old, &alphas); + assert_mat_compare(&result, &target, I32F32::from_num(1e-4)); + let old: Vec = vec![ + 0.001, 0.002, 0.003, 0.004, 0.05, 0.006, 0.007, 0.008, 0.009, 0.010, 0.011, 0.012, + ]; let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, ]; let target: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + 0.10, 0.2, 1., 0.0399, 0.05, 0.0599, 0.07, 0.07999, 0.09, 0.1, 0.10999, 0.11999, ]; + let old = vec_to_mat_fixed(&old, 4, false); let new = vec_to_mat_fixed(&new, 4, false); let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(1)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[1.; 12], 4, false); + let result = mat_ema_alpha(&new, &old, &alphas); + assert_mat_compare(&result, &target, I32F32::from_num(1e-4)); } #[test] -fn test_math_sparse_mat_ema() { - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., +fn test_math_sparse_mat_ema_alpha() { + let old: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, ]; + let new: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; let target: Vec = vec![ - 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, + 0.19, 0.38, 1., 0.43599, 0.545, 0.65399, 0.763, 0.87199, 0.981, 1., 1., 1., ]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; - let target: Vec = vec![1., 3.8, 2.7, 7.6, 0., 11.4, 6.3, 15.2, 9., 19., 20.9, 22.8]; + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); + let old: Vec = vec![ + 0.001, 0.002, 0.003, 0.004, 0.05, 0.006, 0.007, 0.008, 0.009, 0.010, 0.011, 0.012, + ]; + let new: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, + ]; + let target: Vec = vec![ + 0.0109, 0.0218, 0.30270, 0.007599, 0.05, 0.01139, 0.0133, 0.01519, 0.017, 0.01899, 0.02089, + 0.0227, + ]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; - let target: Vec = vec![1., 2., 0., 4., 0., 6., 0., 8., 9., 10., 11., 12.]; + let new: Vec = vec![ + 0.1, 0.2, 3., 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, + ]; + let target: Vec = vec![ + 0.01, 0.02, 0.3, 0.00399, 0.005, 0.00599, 0.007, 0.00799, 0.009, 0.01, 0.011, 0.01199, + ]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let new: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-4)); let old: Vec = vec![1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; let new: Vec = vec![0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0.9, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0.0, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.]; let old = vec_to_sparse_mat_fixed(&old, 4, false); let new = vec_to_sparse_mat_fixed(&new, 4, false); let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let alphas = vec_to_mat_fixed(&[0.1; 12], 4, false); + let result = mat_ema_alpha_sparse(&new, &old, &alphas); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(1e-1)); } #[test] @@ -2505,25 +2549,25 @@ fn test_checked_sum() { } #[test] -fn test_mat_ema_alpha_vec_sparse_empty() { +fn test_mat_ema_alpha_sparse_empty() { let new: Vec> = Vec::new(); let old: Vec> = Vec::new(); - let alpha: Vec = Vec::new(); - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha: Vec> = Vec::new(); + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_eq!(result, Vec::>::new()); } #[test] -fn test_mat_ema_alpha_vec_sparse_single_element() { +fn test_mat_ema_alpha_sparse_single_element() { let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; - let alpha: Vec = vec![I32F32::from_num(0.5)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); - assert_eq!(result, vec![vec![(0, I32F32::from_num(1.5))]]); + let alpha = vec![vec![I32F32::from_num(0.5)]]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); } #[test] -fn test_mat_ema_alpha_vec_sparse_multiple_elements() { +fn test_mat_ema_alpha_sparse_multiple_elements() { let new: Vec> = vec![ vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(2.0))], vec![(0, I32F32::from_num(3.0)), (1, I32F32::from_num(4.0))], @@ -2532,35 +2576,35 @@ fn test_mat_ema_alpha_vec_sparse_multiple_elements() { vec![(0, I32F32::from_num(5.0)), (1, I32F32::from_num(6.0))], vec![(0, I32F32::from_num(7.0)), (1, I32F32::from_num(8.0))], ]; - let alpha: Vec = vec![I32F32::from_num(0.1), I32F32::from_num(0.2)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(0.1), I32F32::from_num(0.2)]; 2]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); let expected = vec![ - vec![(0, I32F32::from_num(4.6)), (1, I32F32::from_num(5.2))], - vec![(0, I32F32::from_num(6.6)), (1, I32F32::from_num(7.2))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], ]; assert_sparse_mat_compare(&result, &expected, I32F32::from_num(0.000001)); } #[test] -fn test_mat_ema_alpha_vec_sparse_zero_alpha() { +fn test_mat_ema_alpha_sparse_zero_alpha() { let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; - let alpha: Vec = vec![I32F32::from_num(0.0)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); - assert_eq!(result, vec![vec![(0, I32F32::from_num(2.0))]]); + let alpha = vec![vec![I32F32::from_num(0.1), I32F32::from_num(0.0)]]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); } #[test] -fn test_mat_ema_alpha_vec_sparse_one_alpha() { +fn test_mat_ema_alpha_sparse_one_alpha() { let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; - let alpha: Vec = vec![I32F32::from_num(1.0)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(1.0), I32F32::from_num(0.0)]]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); } #[test] -fn test_mat_ema_alpha_vec_sparse_mixed_alpha() { +fn test_mat_ema_alpha_sparse_mixed_alpha() { let new: Vec> = vec![ vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(2.0))], vec![(0, I32F32::from_num(3.0)), (1, I32F32::from_num(4.0))], @@ -2569,20 +2613,20 @@ fn test_mat_ema_alpha_vec_sparse_mixed_alpha() { vec![(0, I32F32::from_num(5.0)), (1, I32F32::from_num(6.0))], vec![(0, I32F32::from_num(7.0)), (1, I32F32::from_num(8.0))], ]; - let alpha: Vec = vec![I32F32::from_num(0.3), I32F32::from_num(0.7)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(0.3), I32F32::from_num(0.7)]; 2]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_sparse_mat_compare( &result, &[ - vec![(0, I32F32::from_num(3.8)), (1, I32F32::from_num(3.2))], - vec![(0, I32F32::from_num(5.8)), (1, I32F32::from_num(5.2))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(1.0))], ], I32F32::from_num(0.000001), ); } #[test] -fn test_mat_ema_alpha_vec_sparse_sparse_matrix() { +fn test_mat_ema_alpha_sparse_sparse_matrix() { let new: Vec> = vec![ vec![(0, I32F32::from_num(1.0))], vec![(1, I32F32::from_num(4.0))], @@ -2591,77 +2635,114 @@ fn test_mat_ema_alpha_vec_sparse_sparse_matrix() { vec![(0, I32F32::from_num(5.0))], vec![(1, I32F32::from_num(8.0))], ]; - let alpha: Vec = vec![I32F32::from_num(0.5), I32F32::from_num(0.5)]; - let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let alpha = vec![vec![I32F32::from_num(0.5), I32F32::from_num(0.5)]; 2]; + let result = mat_ema_alpha_sparse(&new, &old, &alpha); assert_eq!( result, vec![ - vec![(0, I32F32::from_num(3.0))], - vec![(1, I32F32::from_num(6.0))] + vec![(0, I32F32::from_num(1.0))], + vec![(1, I32F32::from_num(1.0))] ] ); } #[test] -fn test_mat_ema_alpha_vec_basic() { +fn test_mat_ema_alpha_basic() { let new = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); let old = mat_to_fixed(&[vec![0.5, 1.5, 2.5], vec![3.5, 4.5, 5.5]]); let alpha = vec![ - I32F32::from_num(0.5), - I32F32::from_num(0.5), - I32F32::from_num(0.5), + vec![ + I32F32::from_num(0.5), + I32F32::from_num(0.5), + I32F32::from_num(0.5), + ]; + 2 ]; - let expected = mat_to_fixed(&[vec![0.75, 1.75, 2.75], vec![3.75, 4.75, 5.75]]); - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let expected = mat_to_fixed(&[vec![0.75, 1.0, 1.0], vec![1.0, 1.0, 1.0]]); + let result = mat_ema_alpha(&new, &old, &alpha); assert_eq!(result, expected); } #[test] -fn test_mat_ema_alpha_vec_varying_alpha() { +fn test_mat_ema_alpha_varying_alpha() { let new = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); let old = mat_to_fixed(&[vec![0.5, 1.5, 2.5], vec![3.5, 4.5, 5.5]]); let alpha = vec![ - I32F32::from_num(0.2), - I32F32::from_num(0.5), - I32F32::from_num(0.8), + vec![ + I32F32::from_num(0.2), + I32F32::from_num(0.5), + I32F32::from_num(0.8), + ]; + 2 ]; - let expected = mat_to_fixed(&[vec![0.6, 1.75, 2.9], vec![3.6, 4.75, 5.9]]); - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let expected = mat_to_fixed(&[vec![0.6, 1.0, 1.0], vec![1.0, 1.0, 1.0]]); + let result = mat_ema_alpha(&new, &old, &alpha); assert_mat_approx_eq(&result, &expected, I32F32::from_num(1e-6)); } #[test] -fn test_mat_ema_alpha_vec_empty_matrices() { +fn test_mat_ema_alpha_sparse_varying_alpha() { + let weights = vec![ + vec![(0, I32F32::from_num(0.1)), (1, I32F32::from_num(0.2))], + vec![(0, I32F32::from_num(0.3)), (1, I32F32::from_num(0.4))], + ]; + let bonds = vec![ + vec![(0, I32F32::from_num(0.5)), (1, I32F32::from_num(0.6))], + vec![(0, I32F32::from_num(0.7)), (1, I32F32::from_num(0.8))], + ]; + let alpha = vec![ + vec![I32F32::from_num(0.9), I32F32::from_num(0.8)], + vec![I32F32::from_num(0.5), I32F32::from_num(0.7)], + ]; + + let expected = vec![ + vec![(0, I32F32::from_num(0.14)), (1, I32F32::from_num(0.28))], + vec![ + (0, I32F32::from_num(0.499999)), + (1, I32F32::from_num(0.519999)), + ], + ]; + + let result = mat_ema_alpha_sparse(&weights, &bonds, &alpha); + // Assert the results with an epsilon for approximate equality + assert_sparse_mat_compare(&result, &expected, I32F32::from_num(1e-6)); +} + +#[test] +fn test_mat_ema_alpha_empty_matrices() { let new: Vec> = vec![]; let old: Vec> = vec![]; - let alpha: Vec = vec![]; + let alpha = vec![]; let expected: Vec> = vec![vec![]; 1]; - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let result = mat_ema_alpha(&new, &old, &alpha); assert_eq!(result, expected); } #[test] -fn test_mat_ema_alpha_vec_single_element() { +fn test_mat_ema_alpha_single_element() { let new = mat_to_fixed(&[vec![1.0]]); let old = mat_to_fixed(&[vec![0.5]]); - let alpha = vec![I32F32::from_num(0.5)]; + let alpha = vec![vec![I32F32::from_num(0.5)]]; let expected = mat_to_fixed(&[vec![0.75]]); - let result = mat_ema_alpha_vec(&new, &old, &alpha); + let result = mat_ema_alpha(&new, &old, &alpha); assert_eq!(result, expected); } // TODO: (@sd): Should these be non panicking? #[test] #[should_panic(expected = "assertion failed")] -fn test_mat_ema_alpha_vec_mismatched_dimensions() { +fn test_mat_ema_alpha_mismatched_dimensions() { let new = mat_to_fixed(&[vec![1.0, 2.0], vec![3.0, 4.0]]); let old = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); let alpha = vec![ - I32F32::from_num(0.5), - I32F32::from_num(0.5), - I32F32::from_num(0.5), + vec![ + I32F32::from_num(0.5), + I32F32::from_num(0.5), + I32F32::from_num(0.5), + ]; + 2 ]; - let _result = mat_ema_alpha_vec(&new, &old, &alpha); + let _result = mat_ema_alpha(&new, &old, &alpha); } #[test] diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 100bbed24e..1dfac06ad5 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -767,3 +767,56 @@ fn test_remove_storage_item Weight>( assert!(!weight.is_zero(), "Migration weight should be non-zero."); }); } + +#[test] +fn test_migrate_remove_commitments_rate_limit() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // Step 1: Simulate Old Storage Entry + // ------------------------------ + const MIGRATION_NAME: &str = "migrate_remove_commitments_rate_limit"; + + // Build the raw storage key: twox128("Commitments") ++ twox128("RateLimit") + let pallet_prefix = twox_128("Commitments".as_bytes()); + let storage_prefix = twox_128("RateLimit".as_bytes()); + + let mut key = Vec::new(); + key.extend_from_slice(&pallet_prefix); + key.extend_from_slice(&storage_prefix); + + let original_value: u64 = 123; + put_raw(&key, &original_value.encode()); + + let stored_before = get_raw(&key).expect("Expected RateLimit to exist"); + assert_eq!( + u64::decode(&mut &stored_before[..]).expect("Failed to decode RateLimit"), + original_value + ); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet" + ); + + // ------------------------------ + // Step 2: Run the Migration + // ------------------------------ + let weight = crate::migrations::migrate_remove_commitments_rate_limit:: + migrate_remove_commitments_rate_limit::(); + + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as completed" + ); + + // ------------------------------ + // Step 3: Verify Migration Effects + // ------------------------------ + assert!( + get_raw(&key).is_none(), + "RateLimit storage should have been cleared" + ); + + assert!(!weight.is_zero(), "Migration weight should be non-zero"); + }); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index a8ab96be8c..221d802ccd 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -132,6 +132,7 @@ parameter_types! { pub const TransactionByteFee: Balance = 100; pub const SDebug:u64 = 1; pub const InitialRho: u16 = 30; + pub const InitialAlphaSigmoidSteepness: u16 = 10; pub const InitialKappa: u16 = 32_767; pub const InitialTempo: u16 = 360; pub const SelfOwnership: u64 = 2; @@ -139,6 +140,7 @@ parameter_types! { pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty:u16 = u16::MAX; + pub const InitialBondsResetOn: bool = false; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production @@ -180,8 +182,10 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On // pub const InitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days + pub const InitialColdkeySwapRescheduleDuration: u64 = 24 * 60 * 60 / 12; // Default as 1 day pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days pub const InitialTaoWeight: u64 = 0; // 100% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks @@ -366,6 +370,7 @@ impl crate::Config for Test { type InitialAdjustmentAlpha = InitialAdjustmentAlpha; type InitialTargetRegistrationsPerInterval = InitialTargetRegistrationsPerInterval; type InitialRho = InitialRho; + type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; @@ -376,6 +381,7 @@ impl crate::Config for Test { type InitialPruningScore = InitialPruningScore; type InitialBondsMovingAverage = InitialBondsMovingAverage; type InitialBondsPenalty = InitialBondsPenalty; + type InitialBondsResetOn = InitialBondsResetOn; type InitialMaxAllowedValidators = InitialMaxAllowedValidators; type InitialDefaultDelegateTake = InitialDefaultDelegateTake; type InitialMinDelegateTake = InitialMinDelegateTake; @@ -404,8 +410,10 @@ impl crate::Config for Test { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; @@ -505,7 +513,6 @@ where impl pallet_drand::Config for Test { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index ce891e5615..161749a923 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -1,6 +1,7 @@ mod batch_tx; mod children; mod coinbase; +mod consensus; mod delegate_info; mod difficulty; mod emission; diff --git a/pallets/subtensor/src/tests/move_stake.rs b/pallets/subtensor/src/tests/move_stake.rs index 0b7584a4f0..dd85ab9075 100644 --- a/pallets/subtensor/src/tests/move_stake.rs +++ b/pallets/subtensor/src/tests/move_stake.rs @@ -566,11 +566,11 @@ fn test_do_move_wrong_origin() { }); } -// 14. test_do_move_same_hotkey -// Description: Attempt to move stake to the same hotkey, which should fail or have no effect -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test move -- test_do_move_same_hotkey --exact --nocapture +// 14. test_do_move_same_hotkey_fails +// Description: Attempt to move stake to the same hotkey, which should fail +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test move -- test_do_move_same_hotkey_fails --exact --nocapture #[test] -fn test_do_move_same_hotkey() { +fn test_do_move_same_hotkey_fails() { new_test_ext(1).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); @@ -587,20 +587,22 @@ fn test_do_move_same_hotkey() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); // Attempt to move stake to the same hotkey - assert_ok!(SubtensorModule::do_move_stake( - RuntimeOrigin::signed(coldkey), - hotkey, - hotkey, - netuid, - netuid, - alpha, - )); + assert_eq!( + SubtensorModule::do_move_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + hotkey, + netuid, + netuid, + alpha, + ), + Err(Error::::SameNetuid.into()) + ); // Check that stake remains unchanged - assert_abs_diff_eq!( + assert_eq!( SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), - alpha - fee, - epsilon = alpha / 1000 + alpha, ); }); } @@ -1151,7 +1153,8 @@ fn test_do_swap_nonexistent_subnet() { new_test_ext(1).execute_with(|| { let coldkey = U256::from(1); let hotkey = U256::from(2); - let nonexistent_netuid: u16 = 9999; + let nonexistent_netuid1: u16 = 9998; + let nonexistent_netuid2: u16 = 9999; let stake_amount = 1_000_000; SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); @@ -1160,8 +1163,8 @@ fn test_do_swap_nonexistent_subnet() { SubtensorModule::do_swap_stake( RuntimeOrigin::signed(coldkey), hotkey, - nonexistent_netuid, - nonexistent_netuid, + nonexistent_netuid1, + nonexistent_netuid2, stake_amount ), Error::::SubnetNotExists @@ -1257,7 +1260,8 @@ fn test_do_swap_minimum_stake_check() { new_test_ext(1).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); - let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let netuid1 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let netuid2 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); let coldkey = U256::from(1); let hotkey = U256::from(3); @@ -1265,14 +1269,14 @@ fn test_do_swap_minimum_stake_check() { let swap_amount = 1; SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); - SubtensorModule::stake_into_subnet(&hotkey, &coldkey, netuid, total_stake, 0); + SubtensorModule::stake_into_subnet(&hotkey, &coldkey, netuid1, total_stake, 0); assert_err!( SubtensorModule::do_swap_stake( RuntimeOrigin::signed(coldkey), hotkey, - netuid, - netuid, + netuid1, + netuid2, swap_amount ), Error::::AmountTooLow @@ -1290,30 +1294,28 @@ fn test_do_swap_same_subnet() { let coldkey = U256::from(1); let hotkey = U256::from(2); let stake_amount = DefaultMinStake::::get() * 10; - let fee = DefaultStakingFee::::get(); SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); SubtensorModule::stake_into_subnet(&hotkey, &coldkey, netuid, stake_amount, 0); let alpha_before = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); - let fee_as_alpha = SubtensorModule::swap_tao_for_alpha(netuid, fee); - assert_ok!(SubtensorModule::do_swap_stake( - RuntimeOrigin::signed(coldkey), - hotkey, - netuid, - netuid, - alpha_before - )); + assert_eq!( + SubtensorModule::do_swap_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + netuid, + alpha_before + ), + Err(Error::::SameNetuid.into()) + ); let alpha_after = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); - assert_abs_diff_eq!( - alpha_after, - alpha_before - fee_as_alpha, - epsilon = alpha_after / 10000 - ); + + assert_eq!(alpha_after, alpha_before,); }); } diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 498cce15b2..3a8ce39ba3 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -37,7 +37,7 @@ fn test_registration_subscribe_ok_dispatch_info_ok() { assert_eq!( call.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(3_142_000_000, 0), + weight: frame_support::weights::Weight::from_parts(3_166_200_000, 0), class: DispatchClass::Normal, pays_fee: Pays::No } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index bc64a740fd..8172cab9da 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -223,7 +223,10 @@ fn test_verify_aggregated_stake_order() { new_test_ext(1).execute_with(|| { let hotkey_account_id = U256::from(533453); let coldkey_account_id = U256::from(55453); - let amount = 900_000_000_000; // over the maximum + let amount = 1_000_000_000_000u64; + let limit_price = 6_000_000_000u64; + let unstake_amount = 150_000_000_000u64; + let limit_price2 = 1_350_000_000; // add network let netuid1: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); @@ -233,8 +236,8 @@ fn test_verify_aggregated_stake_order() { let netuid5: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); let netuid6: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); - let tao_reserve: U96F32 = U96F32::from_num(150_000_000_000_u64); - let alpha_in: U96F32 = U96F32::from_num(100_000_000_000_u64); + let tao_reserve: U96F32 = U96F32::from_num(1_500_000_000_000_u64); + let alpha_in: U96F32 = U96F32::from_num(1_000_000_000_000_u64); for netuid in [netuid1, netuid3, netuid3, netuid4, netuid5, netuid6] { SubnetTAO::::insert(netuid, tao_reserve.to_num::()); @@ -257,8 +260,6 @@ fn test_verify_aggregated_stake_order() { amount, ); - let limit_price = 6_000_000_000u64; - // Add stake with slippage safety and check if the result is ok assert_ok!(SubtensorModule::remove_stake_aggregate( RuntimeOrigin::signed(coldkey_account_id), @@ -271,8 +272,8 @@ fn test_verify_aggregated_stake_order() { RuntimeOrigin::signed(coldkey_account_id), hotkey_account_id, netuid4, - amount, - limit_price, + unstake_amount, + limit_price2, true )); @@ -401,8 +402,10 @@ fn test_verify_aggregated_stake_order() { #[allow(clippy::indexing_slicing)] fn test_verify_aggregated_stake_order_reversed() { new_test_ext(1).execute_with(|| { - let amount = 900_000_000_000; // over the maximum + let amount = 1_000_000_000_000u64; let limit_price = 6_000_000_000u64; + let unstake_amount = 150_000_000_000u64; + let limit_price2 = 1_350_000_000; // Coldkeys and hotkeys let coldkeys = vec![ @@ -422,8 +425,8 @@ fn test_verify_aggregated_stake_order_reversed() { .map(|(h, c)| add_dynamic_network(h, c)) .collect(); - let tao_reserve = U96F32::from_num(150_000_000_000u64); - let alpha_in = U96F32::from_num(100_000_000_000u64); + let tao_reserve = U96F32::from_num(1_500_000_000_000u64); + let alpha_in = U96F32::from_num(1_000_000_000_000u64); for netuid in &netuids { SubnetTAO::::insert(*netuid, tao_reserve.to_num::()); @@ -452,8 +455,8 @@ fn test_verify_aggregated_stake_order_reversed() { RuntimeOrigin::signed(coldkeys[3]), hotkeys[3], netuids[3], - amount, - limit_price, + unstake_amount, + limit_price2, true )); @@ -589,8 +592,10 @@ fn test_verify_aggregated_stake_order_reversed() { #[allow(clippy::indexing_slicing)] fn test_verify_all_job_type_sort_by_coldkey() { new_test_ext(1).execute_with(|| { - let amount = 1_000_000_000_000; + let amount = 1_000_000_000_000u64; let limit_price = 6_000_000_000u64; + let unstake_amount = 150_000_000_000u64; + let limit_price2 = 1_350_000_000; // Coldkeys and hotkeys let coldkeys = vec![ @@ -616,8 +621,8 @@ fn test_verify_all_job_type_sort_by_coldkey() { .map(|(h, c)| add_dynamic_network(h, c)) .collect(); - let tao_reserve = U96F32::from_num(150_000_000_000u64); - let alpha_in = U96F32::from_num(100_000_000_000u64); + let tao_reserve = U96F32::from_num(1_500_000_000_000u64); + let alpha_in = U96F32::from_num(1_000_000_000_000u64); for netuid in &netuids { SubnetTAO::::insert(*netuid, tao_reserve.to_num::()); @@ -683,16 +688,16 @@ fn test_verify_all_job_type_sort_by_coldkey() { RuntimeOrigin::signed(coldkeys[6]), hotkeys[6], netuids[6], - amount, - limit_price, + unstake_amount, + limit_price2, true )); assert_ok!(SubtensorModule::remove_stake_limit_aggregate( RuntimeOrigin::signed(coldkeys[7]), hotkeys[7], netuids[7], - amount, - limit_price, + unstake_amount, + limit_price2, true )); @@ -769,8 +774,10 @@ fn test_verify_all_job_type_sort_by_coldkey() { #[allow(clippy::indexing_slicing)] fn test_verify_all_job_type_sort_by_coldkey_reverse_order() { new_test_ext(1).execute_with(|| { - let amount = 1_000_000_000_000; + let amount = 1_000_000_000_000u64; let limit_price = 6_000_000_000u64; + let unstake_amount = 150_000_000_000u64; + let limit_price2 = 1_350_000_000; // Coldkeys and hotkeys let coldkeys = vec![ @@ -796,8 +803,8 @@ fn test_verify_all_job_type_sort_by_coldkey_reverse_order() { .map(|(h, c)| add_dynamic_network(h, c)) .collect(); - let tao_reserve = U96F32::from_num(150_000_000_000u64); - let alpha_in = U96F32::from_num(100_000_000_000u64); + let tao_reserve = U96F32::from_num(1_500_000_000_000u64); + let alpha_in = U96F32::from_num(1_000_000_000_000u64); for netuid in &netuids { SubnetTAO::::insert(*netuid, tao_reserve.to_num::()); @@ -863,16 +870,16 @@ fn test_verify_all_job_type_sort_by_coldkey_reverse_order() { RuntimeOrigin::signed(coldkeys[6]), hotkeys[6], netuids[6], - amount, - limit_price, + unstake_amount, + limit_price2, true )); assert_ok!(SubtensorModule::remove_stake_limit_aggregate( RuntimeOrigin::signed(coldkeys[7]), hotkeys[7], netuids[7], - amount, - limit_price, + unstake_amount, + limit_price2, true )); @@ -3838,27 +3845,33 @@ fn test_unstake_low_liquidity_validate() { fn test_max_amount_add_root() { new_test_ext(0).execute_with(|| { // 0 price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(0, 0), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(0, 0), + Err(Error::::ZeroMaxStakeAmount) + ); // 0.999999... price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(0, 999_999_999), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(0, 999_999_999), + Err(Error::::ZeroMaxStakeAmount) + ); // 1.0 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(0, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(0, 1_000_000_001), - u64::MAX + Ok(u64::MAX) ); // 2.0 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(0, 2_000_000_000), - u64::MAX + Ok(u64::MAX) ); }); } @@ -3870,27 +3883,33 @@ fn test_max_amount_add_stable() { add_network(netuid, 1, 0); // 0 price => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(netuid, 0), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(netuid, 0), + Err(Error::::ZeroMaxStakeAmount) + ); // 0.999999... price => max is 0 - assert_eq!(SubtensorModule::get_max_amount_add(netuid, 999_999_999), 0); + assert_eq!( + SubtensorModule::get_max_amount_add(netuid, 999_999_999), + Err(Error::::ZeroMaxStakeAmount) + ); // 1.0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(netuid, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(netuid, 1_000_000_001), - u64::MAX + Ok(u64::MAX) ); // 2.0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_add(netuid, 2_000_000_000), - u64::MAX + Ok(u64::MAX) ); }); } @@ -3915,101 +3934,148 @@ fn test_max_amount_add_dynamic() { // tao_in, alpha_in, limit_price, expected_max_swappable [ // Zero handling (no panics) - (0, 1_000_000_000, 100, 0), - (1_000_000_000, 0, 100, 0), - (1_000_000_000, 1_000_000_000, 0, 0), + ( + 0, + 1_000_000_000, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000_000, + 0, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000_000, + 1_000_000_000, + 0, + Err(Error::::ZeroMaxStakeAmount), + ), // Low bounds - (1, 1, 0, 0), - (1, 1, 1, 0), - (1, 1, 2, 0), - (1, 1, 50_000_000_000, 49), + (1, 1, 0, Err(Error::::ZeroMaxStakeAmount)), + (1, 1, 1, Err(Error::::ZeroMaxStakeAmount)), + (1, 1, 2, Err(Error::::ZeroMaxStakeAmount)), + (1, 1, 50_000_000_000, Ok(49)), // Basic math - (1_000, 1_000, 2_000_000_000, 1_000), - (1_000, 1_000, 4_000_000_000, 3_000), - (1_000, 1_000, 16_000_000_000, 15_000), + (1_000, 1_000, 2_000_000_000, Ok(1_000)), + (1_000, 1_000, 4_000_000_000, Ok(3_000)), + (1_000, 1_000, 16_000_000_000, Ok(15_000)), ( 1_000_000_000_000, 1_000_000_000_000, 16_000_000_000, - 15_000_000_000_000, + Ok(15_000_000_000_000), ), // Normal range values with edge cases - (150_000_000_000, 100_000_000_000, 0, 0), - (150_000_000_000, 100_000_000_000, 100_000_000, 0), - (150_000_000_000, 100_000_000_000, 500_000_000, 0), - (150_000_000_000, 100_000_000_000, 1_499_999_999, 0), - (150_000_000_000, 100_000_000_000, 1_500_000_000, 0), - (150_000_000_000, 100_000_000_000, 1_500_000_001, 100), ( 150_000_000_000, 100_000_000_000, - 3_000_000_000, + 0, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 150_000_000_000, + 100_000_000_000, + 100_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 150_000_000_000, + 100_000_000_000, + 500_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( 150_000_000_000, + 100_000_000_000, + 1_499_999_999, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 150_000_000_000, + 100_000_000_000, + 1_500_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + (150_000_000_000, 100_000_000_000, 1_500_000_001, Ok(100)), + ( + 150_000_000_000, + 100_000_000_000, + 3_000_000_000, + Ok(150_000_000_000), ), // Miscellaneous overflows and underflows - (150_000_000_000, 100_000_000_000, u64::MAX, u64::MAX), - (150_000_000_000, 100_000_000_000, u64::MAX / 2, u64::MAX), - (1_000_000, 1_000_000_000_000_000_000_u64, 1, 999_000_000), - (1_000_000, 1_000_000_000_000_000_000_u64, 2, 1_999_000_000), + (150_000_000_000, 100_000_000_000, u64::MAX, Ok(u64::MAX)), + (150_000_000_000, 100_000_000_000, u64::MAX / 2, Ok(u64::MAX)), + (1_000_000, 1_000_000_000_000_000_000_u64, 1, Ok(999_000_000)), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 2, + Ok(1_999_000_000), + ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 10_000, - 9_999_999_000_000, + Ok(9_999_999_000_000), ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 100_000, - 99_999_999_000_000, + Ok(99_999_999_000_000), ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 1_000_000, - 999_999_999_000_000, + Ok(999_999_999_000_000), ), ( 1_000_000, 1_000_000_000_000_000_000_u64, 1_000_000_000, - 999_999_999_999_000_000, + Ok(999_999_999_999_000_000), ), ( 21_000_000_000_000_000, 10_000_000, 4_200_000_000_000_000_000, - 21_000_000_000_000_000, + Ok(21_000_000_000_000_000), ), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, u64::MAX, - u64::MAX, + Ok(u64::MAX), ), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, 42_000_000, - 21_000_000_000_000_000, + Ok(21_000_000_000_000_000), ), ] .iter() - .for_each(|&(tao_in, alpha_in, limit_price, expected_max_swappable)| { - // Forse-set alpha in and tao reserve to achieve relative price of subnets - SubnetTAO::::insert(netuid, tao_in); - SubnetAlphaIn::::insert(netuid, alpha_in); - - if alpha_in != 0 { - let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); - assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); - } + .for_each( + |&(tao_in, alpha_in, limit_price, ref expected_max_swappable)| { + // Forse-set alpha in and tao reserve to achieve relative price of subnets + SubnetTAO::::insert(netuid, tao_in); + SubnetAlphaIn::::insert(netuid, alpha_in); - assert_eq!( - SubtensorModule::get_max_amount_add(netuid, limit_price), - expected_max_swappable, - ); - }); + if alpha_in != 0 { + let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); + assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); + } + + assert_eq!( + SubtensorModule::get_max_amount_add(netuid, limit_price), + *expected_max_swappable, + ); + }, + ); }); } @@ -4017,31 +4083,37 @@ fn test_max_amount_add_dynamic() { fn test_max_amount_remove_root() { new_test_ext(0).execute_with(|| { // 0 price on root => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_remove(0, 0), u64::MAX); + assert_eq!(SubtensorModule::get_max_amount_remove(0, 0), Ok(u64::MAX)); // 0.5 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(0, 500_000_000), - u64::MAX + Ok(u64::MAX) ); // 0.999999... price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(0, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price on root => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(0, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_remove(0, 1_000_000_001), 0); + assert_eq!( + SubtensorModule::get_max_amount_remove(0, 1_000_000_001), + Err(Error::::ZeroMaxStakeAmount) + ); // 2.0 price on root => max is 0 - assert_eq!(SubtensorModule::get_max_amount_remove(0, 2_000_000_000), 0); + assert_eq!( + SubtensorModule::get_max_amount_remove(0, 2_000_000_000), + Err(Error::::ZeroMaxStakeAmount) + ); }); } @@ -4052,30 +4124,33 @@ fn test_max_amount_remove_stable() { add_network(netuid, 1, 0); // 0 price => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_remove(netuid, 0), u64::MAX); + assert_eq!( + SubtensorModule::get_max_amount_remove(netuid, 0), + Ok(u64::MAX) + ); // 0.999999... price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 1_000_000_001), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 2.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_remove(netuid, 2_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); }); } @@ -4100,85 +4175,142 @@ fn test_max_amount_remove_dynamic() { // tao_in, alpha_in, limit_price, expected_max_swappable [ // Zero handling (no panics) - (0, 1_000_000_000, 100, 0), - (1_000_000_000, 0, 100, 0), - (1_000_000_000, 1_000_000_000, 0, u64::MAX), + ( + 0, + 1_000_000_000, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000_000, + 0, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + (1_000_000_000, 1_000_000_000, 0, Ok(u64::MAX)), // Low bounds - (1, 1, 0, u64::MAX), - (1, 1, 1, 999_999_999), - (1, 1, 2, 499_999_999), - (1, 1, 250_000_000, 3), + (1, 1, 0, Ok(u64::MAX)), + (1, 1, 1, Ok(999_999_999)), + (1, 1, 2, Ok(499_999_999)), + (1, 1, 250_000_000, Ok(3)), // Basic math - (1_000, 1_000, 250_000_000, 3_000), - (1_000, 1_000, 62_500_000, 15_000), + (1_000, 1_000, 250_000_000, Ok(3_000)), + (1_000, 1_000, 62_500_000, Ok(15_000)), ( 1_000_000_000_000, 1_000_000_000_000, 62_500_000, - 15_000_000_000_000, + Ok(15_000_000_000_000), ), // Normal range values with edge cases - (200_000_000_000, 100_000_000_000, 0, u64::MAX), + (200_000_000_000, 100_000_000_000, 0, Ok(u64::MAX)), ( 200_000_000_000, 100_000_000_000, 1_000_000_000, - 100_000_000_000, + Ok(100_000_000_000), ), ( 200_000_000_000, 100_000_000_000, 500_000_000, - 300_000_000_000, + Ok(300_000_000_000), ), - (200_000_000_000, 100_000_000_000, 2_000_000_000, 0), - (200_000_000_000, 100_000_000_000, 2_000_000_001, 0), - (200_000_000_000, 100_000_000_000, 1_999_999_999, 50), - (200_000_000_000, 100_000_000_000, 1_999_999_990, 500), + ( + 200_000_000_000, + 100_000_000_000, + 2_000_000_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 200_000_000_000, + 100_000_000_000, + 2_000_000_001, + Err(Error::::ZeroMaxStakeAmount), + ), + (200_000_000_000, 100_000_000_000, 1_999_999_999, Ok(50)), + (200_000_000_000, 100_000_000_000, 1_999_999_990, Ok(500)), // Miscellaneous overflows and underflows - (2_000_000_000_000, 100_000_000_000, u64::MAX, 0), - (200_000_000_000, 100_000_000_000, u64::MAX / 2, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 1, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 10, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 100, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, 1_000, 0), - (1_000_000, 1_000_000_000_000_000_000_u64, u64::MAX, 0), + ( + 2_000_000_000_000, + 100_000_000_000, + u64::MAX, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 200_000_000_000, + 100_000_000_000, + u64::MAX / 2, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 1, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 10, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 100, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + 1_000, + Err(Error::::ZeroMaxStakeAmount), + ), + ( + 1_000_000, + 1_000_000_000_000_000_000_u64, + u64::MAX, + Err(Error::::ZeroMaxStakeAmount), + ), ( 21_000_000_000_000_000, 1_000_000, 21_000_000_000_000_000, - 999_000_000, + Ok(999_000_000), ), - (21_000_000_000_000_000, 1_000_000, u64::MAX, 138_412), + (21_000_000_000_000_000, 1_000_000, u64::MAX, Ok(138_412)), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, u64::MAX, - 0, + Err(Error::::ZeroMaxStakeAmount), ), ( 21_000_000_000_000_000, 1_000_000_000_000_000_000_u64, 20_000_000, - 50_000_000_000_000_000, + Ok(50_000_000_000_000_000), ), ] .iter() - .for_each(|&(tao_in, alpha_in, limit_price, expected_max_swappable)| { - // Forse-set alpha in and tao reserve to achieve relative price of subnets - SubnetTAO::::insert(netuid, tao_in); - SubnetAlphaIn::::insert(netuid, alpha_in); - - if alpha_in != 0 { - let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); - assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); - } + .for_each( + |&(tao_in, alpha_in, limit_price, ref expected_max_swappable)| { + // Forse-set alpha in and tao reserve to achieve relative price of subnets + SubnetTAO::::insert(netuid, tao_in); + SubnetAlphaIn::::insert(netuid, alpha_in); - assert_eq!( - SubtensorModule::get_max_amount_remove(netuid, limit_price), - expected_max_swappable, - ); - }); + if alpha_in != 0 { + let expected_price = I96F32::from_num(tao_in) / I96F32::from_num(alpha_in); + assert_eq!(SubtensorModule::get_alpha_price(netuid), expected_price); + } + + assert_eq!( + SubtensorModule::get_max_amount_remove(netuid, limit_price), + *expected_max_swappable, + ); + }, + ); }); } @@ -4187,31 +4319,37 @@ fn test_max_amount_remove_dynamic() { fn test_max_amount_move_root_root() { new_test_ext(0).execute_with(|| { // 0 price on (root, root) exchange => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 0), u64::MAX); + assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 0), Ok(u64::MAX)); // 0.5 price on (root, root) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, 0, 500_000_000), - u64::MAX + Ok(u64::MAX) ); // 0.999999... price on (root, root) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, 0, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price on (root, root) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, 0, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on (root, root) => max is 0 - assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 1_000_000_001), 0); + assert_eq!( + SubtensorModule::get_max_amount_move(0, 0, 1_000_000_001), + Err(Error::::ZeroMaxStakeAmount) + ); // 2.0 price on (root, root) => max is 0 - assert_eq!(SubtensorModule::get_max_amount_move(0, 0, 2_000_000_000), 0); + assert_eq!( + SubtensorModule::get_max_amount_move(0, 0, 2_000_000_000), + Err(Error::::ZeroMaxStakeAmount) + ); }); } @@ -4223,36 +4361,39 @@ fn test_max_amount_move_root_stable() { add_network(netuid, 1, 0); // 0 price on (root, stable) exchange => max is u64::MAX - assert_eq!(SubtensorModule::get_max_amount_move(0, netuid, 0), u64::MAX); + assert_eq!( + SubtensorModule::get_max_amount_move(0, netuid, 0), + Ok(u64::MAX) + ); // 0.5 price on (root, stable) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 500_000_000), - u64::MAX + Ok(u64::MAX) ); // 0.999999... price on (root, stable) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 999_999_999), - u64::MAX + Ok(u64::MAX) ); // 1.0 price on (root, stable) => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 1_000_000_000), - u64::MAX + Ok(u64::MAX) ); // 1.000...001 price on (root, stable) => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 1_000_000_001), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 2.0 price on (root, stable) => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(0, netuid, 2_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); }); } @@ -4284,24 +4425,25 @@ fn test_max_amount_move_stable_dynamic() { // 0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 0), - u64::MAX + Ok(u64::MAX) ); // 2.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 2_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 3.0 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 3_000_000_000), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 2x price => max is 1x TAO assert_abs_diff_eq!( - SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_000_000_000), + SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_000_000_000) + .unwrap(), 50_000_000_000, epsilon = 10_000, ); @@ -4309,21 +4451,23 @@ fn test_max_amount_move_stable_dynamic() { // Precision test: // 1.99999..9000 price => max > 0 assert!( - SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_999_999_000) > 0 + SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, 1_999_999_000) + .unwrap() + > 0 ); // Max price doesn't panic and returns something meaningful assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, u64::MAX), - 0 + Err(Error::::ZeroMaxStakeAmount) ); assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, u64::MAX - 1), - 0 + Err(Error::::ZeroMaxStakeAmount) ); assert_eq!( SubtensorModule::get_max_amount_move(stable_netuid, dynamic_netuid, u64::MAX / 2), - 0 + Err(Error::::ZeroMaxStakeAmount) ); }); } @@ -4355,30 +4499,38 @@ fn test_max_amount_move_dynamic_stable() { // 0 price => max is u64::MAX assert_eq!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 0), - u64::MAX + Ok(u64::MAX) ); // Low price values don't blow things up - assert!(SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1) > 0); - assert!(SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 2) > 0); - assert!(SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 3) > 0); + assert!( + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1).unwrap() > 0 + ); + assert!( + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 2).unwrap() > 0 + ); + assert!( + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 3).unwrap() > 0 + ); // 1.5000...1 price => max is 0 assert_eq!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_500_000_001), - 0 + Err(Error::::ZeroMaxStakeAmount) ); // 1.5 price => max is 0 because of non-zero slippage assert_abs_diff_eq!( - SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_500_000_000), + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_500_000_000) + .unwrap_or(0), 0, epsilon = 10_000 ); // 1/2 price => max is 1x Alpha assert_abs_diff_eq!( - SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 750_000_000), + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 750_000_000) + .unwrap(), 100_000_000_000, epsilon = 10_000, ); @@ -4386,20 +4538,25 @@ fn test_max_amount_move_dynamic_stable() { // Precision test: // 1.499999.. price => max > 0 assert!( - SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_499_999_999) > 0 + SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, 1_499_999_999) + .unwrap() + > 0 ); // Max price doesn't panic and returns something meaningful assert!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, u64::MAX) + .unwrap_or(0) < 21_000_000_000_000_000 ); assert!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, u64::MAX - 1) + .unwrap_or(0) < 21_000_000_000_000_000 ); assert!( SubtensorModule::get_max_amount_move(dynamic_netuid, stable_netuid, u64::MAX / 2) + .unwrap_or(0) < 21_000_000_000_000_000 ); }); @@ -4623,7 +4780,8 @@ fn test_max_amount_move_dynamic_dynamic() { origin_netuid, destination_netuid, limit_price - ), + ) + .unwrap_or(0u64), expected_max_swappable, epsilon = precision ); @@ -4890,6 +5048,39 @@ fn test_add_stake_limit_fill_or_kill() { }); } +#[test] +fn test_add_stake_limit_partial_zero_max_stake_amount_error() { + new_test_ext(1).execute_with(|| { + let hotkey_account_id = U256::from(533453); + let coldkey_account_id = U256::from(55453); + + // Exact values from the error: + // https://taostats.io/extrinsic/5338471-0009?network=finney + let amount = 19980000000; + let limit_price = 26953618; + let tao_reserve: U96F32 = U96F32::from_num(5_032_494_439_940_u64); + let alpha_in: U96F32 = U96F32::from_num(186_268_425_402_874_u64); + + let netuid: u16 = add_dynamic_network(&hotkey_account_id, &coldkey_account_id); + SubnetTAO::::insert(netuid, tao_reserve.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_in.to_num::()); + + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); + + assert_noop!( + SubtensorModule::add_stake_limit( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + amount, + limit_price, + true + ), + Error::::ZeroMaxStakeAmount + ); + }); +} + #[test] fn test_remove_stake_limit_ok() { new_test_ext(1).execute_with(|| { @@ -5920,3 +6111,50 @@ fn test_unstake_all_aggregate_fails() { })); }); } + +#[test] +fn test_increase_stake_for_hotkey_and_coldkey_on_subnet_adds_to_staking_hotkeys_map() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let coldkey1 = U256::from(2); + let hotkey = U256::from(3); + + let netuid = 1; + let stake_amount = 100_000_000_000; + + // Check no entry in the staking hotkeys map + assert!(!StakingHotkeys::::contains_key(coldkey)); + // insert manually + StakingHotkeys::::insert(coldkey, Vec::::new()); + // check entry has no hotkey + assert!(!StakingHotkeys::::get(coldkey).contains(&hotkey)); + + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + stake_amount, + ); + + // Check entry exists in the staking hotkeys map + assert!(StakingHotkeys::::contains_key(coldkey)); + // check entry has hotkey + assert!(StakingHotkeys::::get(coldkey).contains(&hotkey)); + + // Check no entry in the staking hotkeys map for coldkey1 + assert!(!StakingHotkeys::::contains_key(coldkey1)); + + // Run increase stake for hotkey and coldkey1 on subnet + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey1, + netuid, + stake_amount, + ); + + // Check entry exists in the staking hotkeys map for coldkey1 + assert!(StakingHotkeys::::contains_key(coldkey1)); + // check entry has hotkey + assert!(StakingHotkeys::::get(coldkey1).contains(&hotkey)); + }); +} diff --git a/pallets/subtensor/src/tests/swap_coldkey.rs b/pallets/subtensor/src/tests/swap_coldkey.rs index beb4df59a5..385830904c 100644 --- a/pallets/subtensor/src/tests/swap_coldkey.rs +++ b/pallets/subtensor/src/tests/swap_coldkey.rs @@ -1883,6 +1883,71 @@ fn test_schedule_swap_coldkey_with_pending_swap() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_schedule_swap_coldkey_failure_and_reschedule --exact --nocapture +#[test] +fn test_schedule_swap_coldkey_failure_and_reschedule() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey1 = U256::from(2); + let new_coldkey2 = U256::from(3); + + let swap_cost = SubtensorModule::get_key_swap_cost(); + + // Two swaps + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, swap_cost + 1_000 * 2); + + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey1 + )); + + let current_block = >::block_number(); + let duration = ColdkeySwapScheduleDuration::::get(); + let when = current_block.saturating_add(duration); + + // Setup first key to fail + // -- will fail if the new coldkey is already a hotkey (has an Owner) + Owner::::insert(new_coldkey1, U256::from(4)); + + // First swap fails + run_to_block(when - 1); + next_block(); + + // Check the failure + next_block(); // Still in the scheduled-swap map + assert!(ColdkeySwapScheduled::::contains_key(old_coldkey)); + + // Try to schedule the second swap + assert_noop!( + SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey2 + ), + Error::::SwapAlreadyScheduled + ); + + // Wait for correct duration after first swap fails + let fail_duration = ColdkeySwapRescheduleDuration::::get(); + run_to_block(when + fail_duration); + + // Schedule the second swap + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey2 + )); + + let current_block = >::block_number(); + let duration = ColdkeySwapScheduleDuration::::get(); + let when = current_block.saturating_add(duration); + run_to_block(when - 1); + next_block(); + + // Check the success + next_block(); // Now in the scheduled-swap map + assert!(!ColdkeySwapScheduled::::contains_key(old_coldkey)); + }); +} + #[test] fn test_coldkey_swap_delegate_identity_updated() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/utils/evm.rs b/pallets/subtensor/src/utils/evm.rs index a34f6afc80..6877739f69 100644 --- a/pallets/subtensor/src/utils/evm.rs +++ b/pallets/subtensor/src/utils/evm.rs @@ -1,10 +1,26 @@ use super::*; +use alloc::string::ToString; use frame_support::ensure; use frame_system::ensure_signed; use sp_core::{H160, ecdsa::Signature, hashing::keccak_256}; +use sp_std::vec::Vec; + +const MESSAGE_PREFIX: &str = "\x19Ethereum Signed Message:\n"; impl Pallet { + pub(crate) fn hash_message_eip191>(message: M) -> [u8; 32] { + let msg_len = message.as_ref().len().to_string(); + keccak_256( + &[ + MESSAGE_PREFIX.as_bytes(), + msg_len.as_bytes(), + message.as_ref(), + ] + .concat(), + ) + } + /// Associate an EVM key with a hotkey. /// /// This function accepts a Signature, which is a signed message containing the hotkey concatenated with @@ -30,7 +46,7 @@ impl Pallet { hotkey: T::AccountId, evm_key: H160, block_number: u64, - signature: Signature, + mut signature: Signature, ) -> dispatch::DispatchResult { let coldkey = ensure_signed(origin)?; @@ -39,15 +55,18 @@ impl Pallet { Error::::NonAssociatedColdKey ); + // Normalize the v value to 0 or 1 + if signature.0[64] >= 27 { + signature.0[64] = signature.0[64].saturating_sub(27); + } + let uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey)?; - let mut message = [0u8; 64]; let block_hash = keccak_256(block_number.encode().as_ref()); - message[..32].copy_from_slice(&hotkey.encode()[..]); - message[32..].copy_from_slice(block_hash.as_ref()); + let message = [hotkey.encode().as_ref(), block_hash.as_ref()].concat(); let public = signature - .recover_prehashed(&keccak_256(message.as_ref())) - .ok_or(Error::::UnableToRecoverPublicKey)?; + .recover_prehashed(&Self::hash_message_eip191(message)) + .ok_or(Error::::InvalidIdentity)?; let secp_pubkey = libsecp256k1::PublicKey::parse_compressed(&public.0) .map_err(|_| Error::::UnableToRecoverPublicKey)?; let uncompressed = secp_pubkey.serialize(); @@ -71,4 +90,19 @@ impl Pallet { Ok(()) } + + pub fn uid_lookup(netuid: u16, evm_key: H160, limit: u16) -> Vec<(u16, u64)> { + let mut ret_val = AssociatedEvmAddress::::iter_prefix(netuid) + .take(limit as usize) + .filter_map(|(uid, (stored_evm_key, block_associated))| { + if stored_evm_key != evm_key { + return None; + } + + Some((uid, block_associated)) + }) + .collect::>(); + ret_val.sort_by(|(_, block1), (_, block2)| block1.cmp(block2)); + ret_val + } } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index b375cc66e4..899fa83646 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -582,6 +582,14 @@ impl Pallet { Self::deposit_event(Event::BondsPenaltySet(netuid, bonds_penalty)); } + pub fn get_bonds_reset(netuid: u16) -> bool { + BondsResetOn::::get(netuid) + } + pub fn set_bonds_reset(netuid: u16, bonds_reset: bool) { + BondsResetOn::::insert(netuid, bonds_reset); + Self::deposit_event(Event::BondsResetOnSet(netuid, bonds_reset)); + } + pub fn get_max_registrations_per_block(netuid: u16) -> u16 { MaxRegistrationsPerBlock::::get(netuid) } @@ -671,6 +679,12 @@ impl Pallet { AlphaValues::::get(netuid) } + pub fn set_alpha_values_32(netuid: u16, low: I32F32, high: I32F32) { + let low = (low.saturating_mul(I32F32::saturating_from_num(u16::MAX))).to_num::(); + let high = (high.saturating_mul(I32F32::saturating_from_num(u16::MAX))).to_num::(); + AlphaValues::::insert(netuid, (low, high)); + } + pub fn get_alpha_values_32(netuid: u16) -> (I32F32, I32F32) { let (alpha_low, alpha_high): (u16, u16) = AlphaValues::::get(netuid); let converted_low = @@ -681,6 +695,14 @@ impl Pallet { (converted_low, converted_high) } + pub fn set_alpha_sigmoid_steepness(netuid: u16, steepness: u16) { + AlphaSigmoidSteepness::::insert(netuid, steepness); + } + pub fn get_alpha_sigmoid_steepness(netuid: u16) -> I32F32 { + let alpha = AlphaSigmoidSteepness::::get(netuid); + I32F32::saturating_from_num(alpha) + } + pub fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { LiquidAlphaOn::::set(netuid, enabled); } @@ -689,6 +711,14 @@ impl Pallet { LiquidAlphaOn::::get(netuid) } + pub fn set_yuma3_enabled(netuid: u16, enabled: bool) { + Yuma3On::::set(netuid, enabled); + } + + pub fn get_yuma3_enabled(netuid: u16) -> bool { + Yuma3On::::get(netuid) + } + /// Set the duration for coldkey swap /// /// # Arguments diff --git a/precompiles/src/extensions.rs b/precompiles/src/extensions.rs index 2d3d65a41c..1c90922c57 100644 --- a/precompiles/src/extensions.rs +++ b/precompiles/src/extensions.rs @@ -6,8 +6,8 @@ use frame_support::dispatch::{GetDispatchInfo, Pays, PostDispatchInfo}; use frame_system::RawOrigin; use pallet_admin_utils::{PrecompileEnable, PrecompileEnum}; use pallet_evm::{ - AddressMapping, BalanceConverter, ExitError, GasWeightMapping, Precompile, PrecompileFailure, - PrecompileHandle, PrecompileResult, + AddressMapping, BalanceConverter, EvmBalance, ExitError, GasWeightMapping, Precompile, + PrecompileFailure, PrecompileHandle, PrecompileResult, }; use precompile_utils::EvmResult; use sp_core::{H160, U256, blake2_256}; @@ -27,14 +27,14 @@ pub(crate) trait PrecompileHandleExt: PrecompileHandle { where R: pallet_evm::Config, { - let amount = self.context().apparent_value; - ::BalanceConverter::into_substrate_balance(amount).ok_or( - PrecompileFailure::Error { + let amount = EvmBalance::new(self.context().apparent_value); + let result = ::BalanceConverter::into_substrate_balance(amount) + .ok_or(PrecompileFailure::Error { exit_status: ExitError::Other( "error converting balance from ETH to subtensor".into(), ), - }, - ) + })?; + Ok(result.into()) } /// Dispatches a runtime call, but also checks and records the gas costs. diff --git a/precompiles/src/lib.rs b/precompiles/src/lib.rs index ed0c2222a2..ca52831323 100644 --- a/precompiles/src/lib.rs +++ b/precompiles/src/lib.rs @@ -26,6 +26,7 @@ use crate::metagraph::*; use crate::neuron::*; use crate::staking::*; use crate::subnet::*; +use crate::uid_lookup::*; mod balance_transfer; mod ed25519; @@ -34,6 +35,7 @@ mod metagraph; mod neuron; mod staking; mod subnet; +mod uid_lookup; pub struct Precompiles(PhantomData); @@ -158,6 +160,9 @@ where a if a == hash(NeuronPrecompile::::INDEX) => { NeuronPrecompile::::try_execute::(handle, PrecompileEnum::Neuron) } + a if a == hash(UidLookupPrecompile::::INDEX) => { + UidLookupPrecompile::::try_execute::(handle, PrecompileEnum::UidLookup) + } _ => None, } } diff --git a/precompiles/src/solidity/stakingV2.abi b/precompiles/src/solidity/stakingV2.abi index 16adb1d8a8..20cc9c90fe 100644 --- a/precompiles/src/solidity/stakingV2.abi +++ b/precompiles/src/solidity/stakingV2.abi @@ -251,5 +251,71 @@ "outputs": [], "stateMutability": "nonpayable", "type": "function" - } + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "addStakeLimit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "limit_price", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "allow_partial", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "removeStakeLimit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, ] diff --git a/precompiles/src/solidity/stakingV2.sol b/precompiles/src/solidity/stakingV2.sol index dd033cfca8..202615af62 100644 --- a/precompiles/src/solidity/stakingV2.sol +++ b/precompiles/src/solidity/stakingV2.sol @@ -51,12 +51,12 @@ interface IStaking { ) external; /** - * @dev Moves a subtensor stake `amount` associated with the `hotkey` to a different hotkey + * @dev Moves a subtensor stake `amount` associated with the `hotkey` to a different hotkey * `destination_hotkey`. * * This function allows external accounts and contracts to move staked TAO from one hotkey to another, - * which effectively calls `move_stake` on the subtensor pallet with specified origin and destination - * hotkeys as parameters being the hashed address mappings of H160 sender address to Substrate ss58 + * which effectively calls `move_stake` on the subtensor pallet with specified origin and destination + * hotkeys as parameters being the hashed address mappings of H160 sender address to Substrate ss58 * address as implemented in Frontier HashedAddressMapping: * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 * @@ -67,7 +67,7 @@ interface IStaking { * @param amount The amount to move in rao. * * Requirements: - * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring + * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring * that the stake is correctly attributed. */ function moveStake( @@ -79,12 +79,12 @@ interface IStaking { ) external; /** - * @dev Transfer a subtensor stake `amount` associated with the transaction signer to a different coldkey + * @dev Transfer a subtensor stake `amount` associated with the transaction signer to a different coldkey * `destination_coldkey`. * * This function allows external accounts and contracts to transfer staked TAO to another coldkey, - * which effectively calls `transfer_stake` on the subtensor pallet with specified destination - * coldkey as a parameter being the hashed address mapping of H160 sender address to Substrate ss58 + * which effectively calls `transfer_stake` on the subtensor pallet with specified destination + * coldkey as a parameter being the hashed address mapping of H160 sender address to Substrate ss58 * address as implemented in Frontier HashedAddressMapping: * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 * @@ -95,7 +95,7 @@ interface IStaking { * @param amount The amount to move in rao. * * Requirements: - * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring + * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring * that the stake is correctly attributed. */ function transferStake( @@ -194,4 +194,59 @@ interface IStaking { bytes32 hotkey, uint256 netuid ) external view returns (uint256); + + /** + * @dev Adds a subtensor stake `amount` associated with the `hotkey` within a price limit. + * + * This function allows external accounts and contracts to stake TAO into the subtensor pallet, + * which effectively calls `add_stake_limit` on the subtensor pallet with specified hotkey as a parameter + * and coldkey being the hashed address mapping of H160 sender address to Substrate ss58 address as + * implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param hotkey The hotkey public key (32 bytes). + * @param amount The amount to stake in rao. + * @param limit_price The price limit to stake at in rao. Number of rao per alpha. + * @param allow_partial Whether to allow partial stake. + * @param netuid The subnet to stake to (uint256). + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is + * correctly attributed. + */ + function addStakeLimit( + bytes32 hotkey, + uint256 amount, + uint256 limit_price, + bool allow_partial, + uint256 netuid + ) external payable; + + /** + * @dev Removes a subtensor stake `amount` from the specified `hotkey` within a price limit. + * + * This function allows external accounts and contracts to unstake TAO from the subtensor pallet, + * which effectively calls `remove_stake_limit` on the subtensor pallet with specified hotkey as a parameter + * and coldkey being the hashed address mapping of H160 sender address to Substrate ss58 address as + * implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param hotkey The hotkey public key (32 bytes). + * @param amount The amount to unstake in alpha. + * @param limit_price The price limit to unstake at in rao. Number of rao per alpha. + * @param allow_partial Whether to allow partial unstake. + * @param netuid The subnet to stake to (uint256). + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is + * correctly attributed. + * - The existing stake amount must be not lower than specified amount + */ + function removeStakeLimit( + bytes32 hotkey, + uint256 amount, + uint256 limit_price, + bool allow_partial, + uint256 netuid + ) external; } diff --git a/precompiles/src/solidity/subnet.abi b/precompiles/src/solidity/subnet.abi index e2a3e569da..a2849a0cbe 100644 --- a/precompiles/src/solidity/subnet.abi +++ b/precompiles/src/solidity/subnet.abi @@ -194,6 +194,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + } + ], + "name": "getYuma3Enabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -668,6 +687,24 @@ "stateMutability": "payable", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + }, + { + "internalType": "bool", + "name": "yuma3Enabled", + "type": "bool" + } + ], + "name": "setYuma3Enabled", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, { "inputs": [ { diff --git a/precompiles/src/solidity/subnet.sol b/precompiles/src/solidity/subnet.sol index d5ef0916d9..2fa9d3f550 100644 --- a/precompiles/src/solidity/subnet.sol +++ b/precompiles/src/solidity/subnet.sol @@ -152,6 +152,13 @@ interface ISubnet { bool liquidAlphaEnabled ) external payable; + function getYuma3Enabled(uint16 netuid) external view returns (bool); + + function setYuma3Enabled( + uint16 netuid, + bool yuma3Enabled + ) external payable; + function getAlphaValues( uint16 netuid ) external view returns (uint16, uint16); diff --git a/precompiles/src/solidity/uidLookup.abi b/precompiles/src/solidity/uidLookup.abi new file mode 100644 index 0000000000..558358dcaa --- /dev/null +++ b/precompiles/src/solidity/uidLookup.abi @@ -0,0 +1,43 @@ +[ + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + }, + { + "internalType": "address", + "name": "evm_address", + "type": "address" + }, + { + "internalType": "uint16", + "name": "limit", + "type": "uint16" + } + ], + "name":"uidLookup", + "outputs": [ + { + "components": [ + { + "internalType": "uint16", + "name": "uid", + "type": "uint16" + }, + { + "internalType": "uint64", + "name": "block_associated", + "type": "uint64" + } + ], + "internalType": "struct LookupItem[]", + "name": "", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/precompiles/src/solidity/uidLookup.sol b/precompiles/src/solidity/uidLookup.sol new file mode 100644 index 0000000000..4eae98899c --- /dev/null +++ b/precompiles/src/solidity/uidLookup.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.8.0; + +address constant IUID_LOOKUP_ADDRESS = 0x0000000000000000000000000000000000000806; + +struct LookupItem { + uint16 uid; + uint64 block_associated; +} + +interface IUidLookup { + function uidLookup( + uint16 netuid, + address evm_address, + uint16 limit + ) external view returns (LookupItem[] memory); +} diff --git a/precompiles/src/staking.rs b/precompiles/src/staking.rs index 8f797a7476..21f50bc917 100644 --- a/precompiles/src/staking.rs +++ b/precompiles/src/staking.rs @@ -30,7 +30,8 @@ use core::marker::PhantomData; use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; use frame_system::RawOrigin; use pallet_evm::{ - AddressMapping, BalanceConverter, ExitError, PrecompileFailure, PrecompileHandle, + AddressMapping, BalanceConverter, EvmBalance, ExitError, PrecompileFailure, PrecompileHandle, + SubstrateBalance, }; use precompile_utils::EvmResult; use sp_core::{H256, U256}; @@ -276,6 +277,56 @@ where handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) } + + #[precompile::public("addStakeLimit(bytes32,uint256,uint256,bool,uint256)")] + fn add_stake_limit( + handle: &mut impl PrecompileHandle, + address: H256, + amount_rao: U256, + limit_price_rao: U256, + allow_partial: bool, + netuid: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let amount_staked = amount_rao.unique_saturated_into(); + let limit_price = limit_price_rao.unique_saturated_into(); + let hotkey = R::AccountId::from(address.0); + let netuid = try_u16_from_u256(netuid)?; + let call = pallet_subtensor::Call::::add_stake_limit { + hotkey, + netuid, + amount_staked, + limit_price, + allow_partial, + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } + + #[precompile::public("removeStakeLimit(bytes32,uint256,uint256,bool,uint256)")] + fn remove_stake_limit( + handle: &mut impl PrecompileHandle, + address: H256, + amount_alpha: U256, + limit_price_rao: U256, + allow_partial: bool, + netuid: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let hotkey = R::AccountId::from(address.0); + let netuid = try_u16_from_u256(netuid)?; + let amount_unstaked = amount_alpha.unique_saturated_into(); + let limit_price = limit_price_rao.unique_saturated_into(); + let call = pallet_subtensor::Call::::remove_stake_limit { + hotkey, + netuid, + amount_unstaked, + limit_price, + allow_partial, + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } } // Deprecated, exists for backward compatibility. @@ -351,10 +402,11 @@ where let account_id = handle.caller_account_id::(); let hotkey = R::AccountId::from(address.0); let netuid = try_u16_from_u256(netuid)?; + let amount = EvmBalance::new(amount); let amount_unstaked = ::BalanceConverter::into_substrate_balance(amount) + .map(|amount| amount.into_u64_saturating()) .ok_or(ExitError::OutOfFund)?; - let amount_unstaked = amount_unstaked.unique_saturated_into(); let call = pallet_subtensor::Call::::remove_stake { hotkey, netuid, @@ -375,8 +427,9 @@ where // get total stake of coldkey let total_stake = pallet_subtensor::Pallet::::get_total_stake_for_coldkey(&coldkey); // Convert to EVM decimals - let stake_u256 = U256::from(total_stake); + let stake_u256: SubstrateBalance = total_stake.into(); let stake_eth = ::BalanceConverter::into_evm_balance(stake_u256) + .map(|amount| amount.into_u256()) .ok_or(ExitError::InvalidRange)?; Ok(stake_eth) @@ -393,8 +446,9 @@ where // get total stake of hotkey let total_stake = pallet_subtensor::Pallet::::get_total_stake_for_hotkey(&hotkey); // Convert to EVM decimals - let stake_u256 = U256::from(total_stake); + let stake_u256: SubstrateBalance = total_stake.into(); let stake_eth = ::BalanceConverter::into_evm_balance(stake_u256) + .map(|amount| amount.into_u256()) .ok_or(ExitError::InvalidRange)?; Ok(stake_eth) @@ -414,8 +468,9 @@ where let stake = pallet_subtensor::Pallet::::get_stake_for_hotkey_and_coldkey_on_subnet( &hotkey, &coldkey, netuid, ); - let stake = U256::from(stake); + let stake: SubstrateBalance = stake.into(); let stake = ::BalanceConverter::into_evm_balance(stake) + .map(|amount| amount.into_u256()) .ok_or(ExitError::InvalidRange)?; Ok(stake) @@ -453,15 +508,20 @@ where account_id: &::AccountId, amount: U256, ) -> Result<(), PrecompileFailure> { + let amount = EvmBalance::new(amount); let amount_sub = ::BalanceConverter::into_substrate_balance(amount) .ok_or(ExitError::OutOfFund)?; // Create a transfer call from the smart contract to the caller + let value = amount_sub + .into_u64_saturating() + .try_into() + .map_err(|_| ExitError::Other("Failed to convert u64 to Balance".into()))?; let transfer_call = ::RuntimeCall::from( pallet_balances::Call::::transfer_allow_death { dest: account_id.clone().into(), - value: amount_sub.unique_saturated_into(), + value, }, ); diff --git a/precompiles/src/subnet.rs b/precompiles/src/subnet.rs index cf2b71bcd2..7d4dd175e3 100644 --- a/precompiles/src/subnet.rs +++ b/precompiles/src/subnet.rs @@ -327,6 +327,12 @@ where Ok(pallet_subtensor::Rho::::get(netuid)) } + #[precompile::public("getAlphaSigmoidSteepness(uint16)")] + #[precompile::view] + fn get_alpha_sigmoid_steepness(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { + Ok(pallet_subtensor::AlphaSigmoidSteepness::::get(netuid)) + } + #[precompile::public("setRho(uint16,uint16)")] #[precompile::payable] fn set_rho(handle: &mut impl PrecompileHandle, netuid: u16, rho: u16) -> EvmResult<()> { @@ -338,6 +344,22 @@ where ) } + #[precompile::public("setAlphaSigmoidSteepness(uint16,uint16)")] + #[precompile::payable] + fn set_alpha_sigmoid_steepness( + handle: &mut impl PrecompileHandle, + netuid: u16, + steepness: u16, + ) -> EvmResult<()> { + let call = + pallet_admin_utils::Call::::sudo_set_alpha_sigmoid_steepness { netuid, steepness }; + + handle.try_dispatch_runtime_call::( + call, + RawOrigin::Signed(handle.caller_account_id::()), + ) + } + #[precompile::public("getActivityCutoff(uint16)")] #[precompile::view] fn get_activity_cutoff(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { @@ -549,6 +571,27 @@ where ) } + #[precompile::public("getYuma3Enabled(uint16)")] + #[precompile::view] + fn get_yuma3_enabled(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult { + Ok(pallet_subtensor::Yuma3On::::get(netuid)) + } + + #[precompile::public("setYuma3Enabled(uint16,bool)")] + #[precompile::payable] + fn set_yuma3_enabled( + handle: &mut impl PrecompileHandle, + netuid: u16, + enabled: bool, + ) -> EvmResult<()> { + let call = pallet_admin_utils::Call::::sudo_set_yuma3_enabled { netuid, enabled }; + + handle.try_dispatch_runtime_call::( + call, + RawOrigin::Signed(handle.caller_account_id::()), + ) + } + #[precompile::public("getAlphaValues(uint16)")] #[precompile::view] fn get_alpha_values(_: &mut impl PrecompileHandle, netuid: u16) -> EvmResult<(u16, u16)> { diff --git a/precompiles/src/uid_lookup.rs b/precompiles/src/uid_lookup.rs new file mode 100644 index 0000000000..61fb9d6d7f --- /dev/null +++ b/precompiles/src/uid_lookup.rs @@ -0,0 +1,53 @@ +use core::marker::PhantomData; + +use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; +use pallet_evm::PrecompileHandle; +use precompile_utils::{EvmResult, prelude::Address}; +use sp_runtime::traits::{Dispatchable, StaticLookup}; +use sp_std::vec::Vec; + +use crate::PrecompileExt; + +pub(crate) struct UidLookupPrecompile(PhantomData); + +impl PrecompileExt for UidLookupPrecompile +where + R: frame_system::Config + pallet_subtensor::Config + pallet_evm::Config, + R::AccountId: From<[u8; 32]>, + ::RuntimeCall: + GetDispatchInfo + Dispatchable, + ::RuntimeCall: From> + + GetDispatchInfo + + Dispatchable, + <::Lookup as StaticLookup>::Source: From, +{ + const INDEX: u64 = 2054; +} + +#[precompile_utils::precompile] +impl UidLookupPrecompile +where + R: frame_system::Config + pallet_subtensor::Config + pallet_evm::Config, + R::AccountId: From<[u8; 32]>, + ::RuntimeCall: + GetDispatchInfo + Dispatchable, + ::RuntimeCall: From> + + GetDispatchInfo + + Dispatchable, + <::Lookup as StaticLookup>::Source: From, +{ + #[precompile::public("uidLookup(uint16,address,uint16)")] + #[precompile::view] + fn uid_lookup( + _handle: &mut impl PrecompileHandle, + netuid: u16, + evm_address: Address, + limit: u16, + ) -> EvmResult> { + Ok(pallet_subtensor::Pallet::::uid_lookup( + netuid, + evm_address.0, + limit, + )) + } +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index a7a89608c9..01618870f4 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -26,7 +26,7 @@ use frame_support::{ }, }; use frame_system::{EnsureNever, EnsureRoot, EnsureRootWithSuccess, RawOrigin}; -use pallet_commitments::CanCommit; +use pallet_commitments::{CanCommit, OnMetadataCommitment}; use pallet_grandpa::{ AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, fg_primitives, }; @@ -95,12 +95,13 @@ use scale_info::TypeInfo; // Frontier use fp_rpc::TransactionStatus; use pallet_ethereum::{Call::transact, PostLogContent, Transaction as EthereumTransaction}; -use pallet_evm::{Account as EVMAccount, BalanceConverter, FeeCalculator, Runner}; +use pallet_evm::{ + Account as EVMAccount, BalanceConverter, EvmBalance, FeeCalculator, Runner, SubstrateBalance, +}; // Drand impl pallet_drand::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; type AuthorityId = pallet_drand::crypto::TestAuthId; type Verifier = pallet_drand::verifier::QuicknetVerifier; type UnsignedPriority = ConstU64<{ 1 << 20 }>; @@ -208,7 +209,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 264, + spec_version: 267, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -953,10 +954,9 @@ impl pallet_registry::Config for Runtime { } parameter_types! { - pub const MaxCommitFieldsInner: u32 = 1; + pub const MaxCommitFieldsInner: u32 = 2; pub const CommitmentInitialDeposit: Balance = 0; // Free pub const CommitmentFieldDeposit: Balance = 0; // Free - pub const CommitmentRateLimit: BlockNumber = 100; // Allow commitment every 100 blocks } #[subtensor_macros::freeze_struct("7c76bd954afbb54e")] @@ -982,17 +982,28 @@ impl CanCommit for AllowCommitments { } } +pub struct ResetBondsOnCommit; +impl OnMetadataCommitment for ResetBondsOnCommit { + #[cfg(not(feature = "runtime-benchmarks"))] + fn on_metadata_commitment(netuid: u16, address: &AccountId) { + let _ = SubtensorModule::do_reset_bonds(netuid, address); + } + + #[cfg(feature = "runtime-benchmarks")] + fn on_metadata_commitment(_: u16, _: &AccountId) {} +} + impl pallet_commitments::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type WeightInfo = pallet_commitments::weights::SubstrateWeight; type CanCommit = AllowCommitments; + type OnMetadataCommitment = ResetBondsOnCommit; type MaxFields = MaxCommitFields; type InitialDeposit = CommitmentInitialDeposit; type FieldDeposit = CommitmentFieldDeposit; - type DefaultRateLimit = CommitmentRateLimit; type TempoInterface = TempoInterface; } @@ -1024,6 +1035,7 @@ pub const INITIAL_CHILDKEY_TAKE_RATELIMIT: u64 = 5; // Configure the pallet subtensor. parameter_types! { pub const SubtensorInitialRho: u16 = 10; + pub const SubtensorInitialAlphaSigmoidSteepness: u16 = 1000; pub const SubtensorInitialKappa: u16 = 32_767; // 0.5 = 65535/2 pub const SubtensorInitialMaxAllowedUids: u16 = 4096; pub const SubtensorInitialIssuance: u64 = 0; @@ -1044,6 +1056,7 @@ parameter_types! { pub const SubtensorInitialPruningScore : u16 = u16::MAX; pub const SubtensorInitialBondsMovingAverage: u64 = 900_000; pub const SubtensorInitialBondsPenalty: u16 = u16::MAX; + pub const SubtensorInitialBondsResetOn: bool = false; pub const SubtensorInitialDefaultTake: u16 = 11_796; // 18% honest number. pub const SubtensorInitialMinDelegateTake: u16 = 0; // Allow 0% delegate take pub const SubtensorInitialDefaultChildKeyTake: u16 = 0; // Allow 0% childkey take @@ -1072,8 +1085,10 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn + pub const InitialYuma3On: bool = false; // Default value for Yuma3On // pub const SubtensorInitialNetworkMaxStake: u64 = u64::MAX; // (DEPRECATED) pub const InitialColdkeySwapScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days + pub const InitialColdkeySwapRescheduleDuration: BlockNumber = 24 * 60 * 60 / 12; // 1 day pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const SubtensorInitialTaoWeight: u64 = 971_718_665_099_567_868; // 0.05267697438728329% tao weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks @@ -1094,10 +1109,12 @@ impl pallet_subtensor::Config for Runtime { type TriumvirateInterface = TriumvirateVotes; type Scheduler = Scheduler; type InitialRho = SubtensorInitialRho; + type InitialAlphaSigmoidSteepness = SubtensorInitialAlphaSigmoidSteepness; type InitialKappa = SubtensorInitialKappa; type InitialMaxAllowedUids = SubtensorInitialMaxAllowedUids; type InitialBondsMovingAverage = SubtensorInitialBondsMovingAverage; type InitialBondsPenalty = SubtensorInitialBondsPenalty; + type InitialBondsResetOn = SubtensorInitialBondsResetOn; type InitialIssuance = SubtensorInitialIssuance; type InitialMinAllowedWeights = SubtensorInitialMinAllowedWeights; type InitialEmissionValue = SubtensorInitialEmissionValue; @@ -1141,9 +1158,11 @@ impl pallet_subtensor::Config for Runtime { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; + type Yuma3On = InitialYuma3On; type InitialTaoWeight = SubtensorInitialTaoWeight; type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialColdkeySwapRescheduleDuration = InitialColdkeySwapRescheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; type DurationOfStartCall = DurationOfStartCall; @@ -1176,7 +1195,6 @@ impl pallet_admin_utils::Config for Runtime { type Aura = AuraPalletIntrf; type Grandpa = GrandpaInterfaceImpl; type Balance = Balance; - type WeightInfo = pallet_admin_utils::weights::SubstrateWeight; } /// Define the ChainId @@ -1239,11 +1257,12 @@ pub struct SubtensorEvmBalanceConverter; impl BalanceConverter for SubtensorEvmBalanceConverter { /// Convert from Substrate balance (u64) to EVM balance (U256) - fn into_evm_balance(value: U256) -> Option { + fn into_evm_balance(value: SubstrateBalance) -> Option { + let value = value.into_u256(); if let Some(evm_value) = value.checked_mul(U256::from(EVM_TO_SUBSTRATE_DECIMALS)) { // Ensure the result fits within the maximum U256 value if evm_value <= U256::MAX { - Some(evm_value) + Some(EvmBalance::new(evm_value)) } else { // Log value too large log::debug!( @@ -1263,11 +1282,12 @@ impl BalanceConverter for SubtensorEvmBalanceConverter { } /// Convert from EVM balance (U256) to Substrate balance (u64) - fn into_substrate_balance(value: U256) -> Option { + fn into_substrate_balance(value: EvmBalance) -> Option { + let value = value.into_u256(); if let Some(substrate_value) = value.checked_div(U256::from(EVM_TO_SUBSTRATE_DECIMALS)) { // Ensure the result fits within the TAO balance type (u64) if substrate_value <= U256::from(u64::MAX) { - Some(substrate_value) + Some(SubstrateBalance::new(substrate_value)) } else { // Log value too large log::debug!( @@ -1447,6 +1467,7 @@ parameter_types! { 432000 // 60 days maximum (60 * 24 * 60 * 60 / 12) }; pub const RefundContributorsLimit: u32 = 50; + pub const MaxContributors: u32 = 500; } impl pallet_crowdloan::Config for Runtime { @@ -1461,6 +1482,7 @@ impl pallet_crowdloan::Config for Runtime { type MinimumBlockDuration = MinimumBlockDuration; type MaximumBlockDuration = MaximumBlockDuration; type RefundContributorsLimit = RefundContributorsLimit; + type MaxContributors = MaxContributors; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -2256,8 +2278,8 @@ fn check_whitelist() { #[test] fn test_into_substrate_balance_valid() { // Valid conversion within u64 range - let evm_balance = U256::from(1_000_000_000_000_000_000u128); // 1 TAO in EVM - let expected_substrate_balance = U256::from(1_000_000_000u128); // 1 TAO in Substrate + let evm_balance: EvmBalance = 1_000_000_000_000_000_000u128.into(); // 1 TAO in EVM + let expected_substrate_balance: SubstrateBalance = 1_000_000_000u128.into(); // 1 TAO in Substrate let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2266,8 +2288,8 @@ fn test_into_substrate_balance_valid() { #[test] fn test_into_substrate_balance_large_value() { // Maximum valid balance for u64 - let evm_balance = U256::from(u64::MAX) * U256::from(EVM_TO_SUBSTRATE_DECIMALS); // Max u64 TAO in EVM - let expected_substrate_balance = U256::from(u64::MAX); + let evm_balance = EvmBalance::new(U256::from(u64::MAX) * U256::from(EVM_TO_SUBSTRATE_DECIMALS)); // Max u64 TAO in EVM + let expected_substrate_balance = SubstrateBalance::new(U256::from(u64::MAX)); let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2276,8 +2298,9 @@ fn test_into_substrate_balance_large_value() { #[test] fn test_into_substrate_balance_exceeds_u64() { // EVM balance that exceeds u64 after conversion - let evm_balance = - (U256::from(u64::MAX) + U256::from(1)) * U256::from(EVM_TO_SUBSTRATE_DECIMALS); + let evm_balance = EvmBalance::new( + (U256::from(u64::MAX) + U256::from(1)) * U256::from(EVM_TO_SUBSTRATE_DECIMALS), + ); let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, None); // Exceeds u64, should return None @@ -2286,8 +2309,8 @@ fn test_into_substrate_balance_exceeds_u64() { #[test] fn test_into_substrate_balance_precision_loss() { // EVM balance with precision loss - let evm_balance = U256::from(1_000_000_000_123_456_789u128); // 1 TAO + extra precision in EVM - let expected_substrate_balance = U256::from(1_000_000_000u128); // Truncated to 1 TAO in Substrate + let evm_balance = EvmBalance::new(U256::from(1_000_000_000_123_456_789u128)); // 1 TAO + extra precision in EVM + let expected_substrate_balance = SubstrateBalance::new(U256::from(1_000_000_000u128)); // Truncated to 1 TAO in Substrate let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2296,8 +2319,8 @@ fn test_into_substrate_balance_precision_loss() { #[test] fn test_into_substrate_balance_zero_value() { // Zero balance should convert to zero - let evm_balance = U256::from(0); - let expected_substrate_balance = U256::from(0); + let evm_balance = EvmBalance::new(U256::from(0)); + let expected_substrate_balance = SubstrateBalance::new(U256::from(0)); let result = SubtensorEvmBalanceConverter::into_substrate_balance(evm_balance); assert_eq!(result, Some(expected_substrate_balance)); @@ -2306,8 +2329,8 @@ fn test_into_substrate_balance_zero_value() { #[test] fn test_into_evm_balance_valid() { // Valid conversion from Substrate to EVM - let substrate_balance = U256::from(1_000_000_000u128); // 1 TAO in Substrate - let expected_evm_balance = U256::from(1_000_000_000_000_000_000u128); // 1 TAO in EVM + let substrate_balance: SubstrateBalance = 1_000_000_000u128.into(); // 1 TAO in Substrate + let expected_evm_balance = EvmBalance::new(U256::from(1_000_000_000_000_000_000u128)); // 1 TAO in EVM let result = SubtensorEvmBalanceConverter::into_evm_balance(substrate_balance); assert_eq!(result, Some(expected_evm_balance)); @@ -2316,8 +2339,9 @@ fn test_into_evm_balance_valid() { #[test] fn test_into_evm_balance_overflow() { // Substrate balance larger than u64::MAX but valid within U256 - let substrate_balance = U256::from(u64::MAX) + U256::from(1); // Large balance - let expected_evm_balance = substrate_balance * U256::from(EVM_TO_SUBSTRATE_DECIMALS); + let substrate_balance = SubstrateBalance::new(U256::from(u64::MAX) + U256::from(1)); // Large balance + let expected_evm_balance = + EvmBalance::new(substrate_balance.into_u256() * U256::from(EVM_TO_SUBSTRATE_DECIMALS)); let result = SubtensorEvmBalanceConverter::into_evm_balance(substrate_balance); assert_eq!(result, Some(expected_evm_balance)); // Should return the scaled value diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 14ade547ea..4a1c99a62c 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -e -EXTRINSIC="${1:-benchmark_register}" +EXTRINSIC="${1:-register}" cargo build \ --profile production \ diff --git a/scripts/benchmark_action.sh b/scripts/benchmark_action.sh index 9989f66c52..34043957de 100755 --- a/scripts/benchmark_action.sh +++ b/scripts/benchmark_action.sh @@ -1,167 +1,288 @@ #!/usr/bin/env bash set -euo pipefail +# A list of pallets we wish to benchmark +PALLETS=(subtensor admin_utils commitments drand) + +# Map of pallet -> dispatch path (relative to this script's directory) +declare -A DISPATCH_PATHS=( + [subtensor]="../pallets/subtensor/src/macros/dispatches.rs" + [admin_utils]="../pallets/admin-utils/src/lib.rs" + [commitments]="../pallets/commitments/src/lib.rs" + [drand]="../pallets/drand/src/lib.rs" +) + # Max allowed drift (%) -THRESHOLD=10 +THRESHOLD=15 +MAX_RETRIES=3 -# Resolve script paths +# We'll build once for runtime-benchmarks SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -DISPATCH="$SCRIPT_DIR/../pallets/subtensor/src/macros/dispatches.rs" -RUNTIME_WASM="./target/production/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm" - -# Sanity check -if [[ ! -f "$DISPATCH" ]]; then - echo "❌ ERROR: dispatches.rs not found at $DISPATCH" - exit 1 -fi +RUNTIME_WASM="$SCRIPT_DIR/../target/production/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm" echo "Building runtime-benchmarks…" cargo build --profile production -p node-subtensor --features runtime-benchmarks echo echo "──────────────────────────────────────────" -echo " Running pallet_subtensor benchmarks…" +echo " Will benchmark pallets: ${PALLETS[*]}" echo "──────────────────────────────────────────" -MAX_RETRIES=3 -attempt=1 - -while (( attempt <= MAX_RETRIES )); do - echo - echo "Attempt #$attempt" - echo "──────────────────────────────────────────" - - # run benchmarks and capture output - TMP="$(mktemp)" - trap "rm -f \"$TMP\"" EXIT - ./target/production/node-subtensor benchmark pallet \ - --runtime "$RUNTIME_WASM" \ - --genesis-builder=runtime \ - --genesis-builder-preset=benchmark \ - --wasm-execution=compiled \ - --pallet pallet_subtensor \ - --extrinsic "*" \ - --steps 50 \ - --repeat 5 \ - | tee "$TMP" - - # reset counters - declare -a summary_lines=() - declare -a failures=() - fail=0 - extr="" - - # parse output - while IFS= read -r line; do - if [[ $line =~ Extrinsic:\ \"benchmark_([[:alnum:]_]+)\" ]]; then - extr="${BASH_REMATCH[1]}" - continue - fi +################################################################################ +# Helper to "finalize" an extrinsic. We look up code-side reads/writes/weight +# in the dispatch file, then compare them to measured values. +################################################################################ + +function process_extr() { + local e="$1" + local us="$2" + local rd="$3" + local wr="$4" + local dispatch_file="$5" - if [[ $line =~ Time\ ~=\ *([0-9]+(\.[0-9]+)?) ]]; then - [[ -z "$extr" ]] && continue + # If any piece is empty, skip + if [[ -z "$e" || -z "$us" || -z "$rd" || -z "$wr" ]]; then + return + fi - meas_us="${BASH_REMATCH[1]}" - meas_ps=$(awk -v u="$meas_us" 'BEGIN{printf("%.0f", u * 1000000)}') + # Convert microseconds to picoseconds + local meas_ps + meas_ps=$(awk -v x="$us" 'BEGIN{printf("%.0f", x * 1000000)}') - # grab reads & writes - meas_reads="" meas_writes="" - while IFS= read -r sub; do - [[ $sub =~ Reads[[:space:]]*=[[:space:]]*([0-9]+) ]] && meas_reads="${BASH_REMATCH[1]}" && continue - [[ $sub =~ Writes[[:space:]]*=[[:space:]]*([0-9]+) ]] && meas_writes="${BASH_REMATCH[1]}" && break - done + # --------------------------------------------------------------------------- + # Code-side lookup from dispatch_file + # --------------------------------------------------------------------------- + local code_record + code_record=$(awk -v extr="$e" ' + /^\s*#\[pallet::call_index\(/ { next } - # extract code-side values - code_record=$( - awk -v extr="$extr" ' - /^\s*#\[pallet::call_index\(/ { next } - /Weight::from_parts/ { - lw=$0; sub(/.*Weight::from_parts\(\s*/, "", lw); - sub(/[^0-9_].*$/, "", lw); gsub(/_/, "", lw); - w=lw - } - /reads_writes\(/ { - lw=$0; sub(/.*reads_writes\(/, "", lw); - sub(/\).*/, "", lw); - split(lw,io,/,/); - gsub(/^[ \t]+|[ \t]+$/, "", io[1]); - gsub(/^[ \t]+|[ \t]+$/, "", io[2]); - r=io[1]; wri=io[2]; next - } - /\.reads\(/ { - lw=$0; sub(/.*\.reads\(/, "", lw); - sub(/\).*/, "", lw); - r=lw; next - } - /\.writes\(/ { - lw=$0; sub(/.*\.writes\(/, "", lw); - sub(/\).*/, "", lw); - wri=lw; next - } - $0 ~ ("pub fn[[:space:]]+" extr "\\(") { print w, r, wri; exit } - ' "$DISPATCH" - ) - read code_w code_reads code_writes <<<"$code_record" - - # strip any non-digit (e.g. "_u64") so math works - code_w=${code_w//_/} - code_w=${code_w%%[^0-9]*} - code_reads=${code_reads//_/} - code_reads=${code_reads%%[^0-9]*} - code_writes=${code_writes//_/} - code_writes=${code_writes%%[^0-9]*} - - # compute drift % - drift=$(awk -v a="$meas_ps" -v b="$code_w" 'BEGIN{printf("%.1f", (a-b)/b*100)}') - - summary_lines+=("$(printf "%-30s | reads code=%3s measured=%3s | writes code=%3s measured=%3s | weight code=%12s measured=%12s | drift %6s%%" \ - "$extr" "$code_reads" "$meas_reads" "$code_writes" "$meas_writes" "$code_w" "$meas_ps" "$drift")") - - # validations - [[ -z "$code_w" ]] && failures+=("[${extr}] missing code weight") && fail=1 - [[ -z "$meas_reads" ]] && failures+=("[${extr}] missing measured reads") && fail=1 - [[ -z "$meas_writes" ]] && failures+=("[${extr}] missing measured writes") && fail=1 - (( meas_reads != code_reads )) && failures+=("[${extr}] reads mismatch code=${code_reads}, measured=${meas_reads}") && fail=1 - (( meas_writes != code_writes )) && failures+=("[${extr}] writes mismatch code=${code_writes}, measured=${meas_writes}") && fail=1 - [[ "$code_w" == "0" ]] && failures+=("[${extr}] zero code weight") && fail=1 - - abs_drift=${drift#-} - drift_int=${abs_drift%%.*} - if (( drift_int > THRESHOLD )); then - failures+=("[${extr}] weight code=${code_w}, measured=${meas_ps}, drift=${drift}%") - fail=1 - fi + /Weight::from_parts/ { + lw = $0 + sub(/.*Weight::from_parts\(\s*/, "", lw) + sub(/[^0-9_].*$/, "", lw) + gsub(/_/, "", lw) + w = lw + } + + /reads_writes\(/ { + lw = $0 + sub(/.*reads_writes\(/, "", lw) + sub(/\).*/, "", lw) + split(lw, io, ",") + gsub(/^[ \t]+|[ \t]+$/, "", io[1]) + gsub(/^[ \t]+|[ \t]+$/, "", io[2]) + r = io[1] + wri = io[2] + next + } + + /\.reads\(/ { + lw = $0 + sub(/.*\.reads\(/, "", lw) + sub(/\).*/, "", lw) + r = lw + next + } + + /\.writes\(/ { + lw = $0 + sub(/.*\.writes\(/, "", lw) + sub(/\).*/, "", lw) + wri = lw + next + } + + # main condition: function name must match "pub fn (" + $0 ~ ("pub fn[[:space:]]+" extr "\\(") { + print w, r, wri + exit + } + ' "$dispatch_file") + + local code_w code_reads code_writes + read code_w code_reads code_writes <<<"$code_record" + + # strip underscores or non-digits + code_w="${code_w//_/}" + code_w="${code_w%%[^0-9]*}" + code_reads="${code_reads//_/}" + code_reads="${code_reads%%[^0-9]*}" + code_writes="${code_writes//_/}" + code_writes="${code_writes%%[^0-9]*}" + + # default them if empty + [[ -z "$code_w" ]] && code_w="0" + [[ -z "$code_reads" ]] && code_reads="0" + [[ -z "$code_writes" ]] && code_writes="0" + + # compute drift + local drift + drift=$(awk -v a="$meas_ps" -v b="$code_w" 'BEGIN { + if (b == "" || b == 0) { + print 99999 + exit + } + printf("%.1f", (a - b) / b * 100) + }') + + # produce summary line + summary_lines+=("$(printf "%-30s | reads code=%3s measured=%3s | writes code=%3s measured=%3s | weight code=%12s measured=%12s | drift %6s%%" \ + "$e" \ + "$code_reads" \ + "$rd" \ + "$code_writes" \ + "$wr" \ + "$code_w" \ + "$meas_ps" \ + "$drift")") + + # validations + if (( rd != code_reads )); then + failures+=("[${e}] reads mismatch code=${code_reads}, measured=${rd}") + fail=1 + fi + if (( wr != code_writes )); then + failures+=("[${e}] writes mismatch code=${code_writes}, measured=${wr}") + fail=1 + fi + if [[ "$code_w" == "0" ]]; then + failures+=("[${e}] zero code weight") + fail=1 + fi + + local abs_drift=${drift#-} + local drift_int=${abs_drift%%.*} + if (( drift_int > THRESHOLD )); then + failures+=("[${e}] weight code=${code_w}, measured=${meas_ps}, drift=${drift}%") + fail=1 + fi +} + +################################################################################ +# We'll do the standard "attempt" logic for each pallet +################################################################################ + +for pallet_name in "${PALLETS[@]}"; do + # ensure the dispatch path is defined + if [[ -z "${DISPATCH_PATHS[$pallet_name]:-}" ]]; then + echo "❌ ERROR: dispatch path not defined for pallet '$pallet_name'" + exit 1 + fi + # Prepend $SCRIPT_DIR to the path + DISPATCH="$SCRIPT_DIR/${DISPATCH_PATHS[$pallet_name]}" + if [[ ! -f "$DISPATCH" ]]; then + echo "❌ ERROR: dispatch file not found at $DISPATCH" + exit 1 + fi + + attempt=1 + pallet_success=0 + + while (( attempt <= MAX_RETRIES )); do + echo + echo "══════════════════════════════════════" + echo "Benchmarking pallet: $pallet_name (attempt #$attempt)" + echo "Dispatch file: $DISPATCH" + echo "══════════════════════════════════════" + + TMP="$(mktemp)" + trap "rm -f \"$TMP\"" EXIT + + # Run benchmark for just this pallet + ./target/production/node-subtensor benchmark pallet \ + --runtime "$RUNTIME_WASM" \ + --genesis-builder=runtime \ + --genesis-builder-preset=benchmark \ + --wasm-execution=compiled \ + --pallet "pallet_${pallet_name}" \ + --extrinsic "*" \ + --steps 50 \ + --repeat 5 \ + | tee "$TMP" + + # now parse results + summary_lines=() + failures=() + fail=0 + + extr="" + meas_us="" + meas_reads="" + meas_writes="" + + function finalize_extr() { + process_extr "$extr" "$meas_us" "$meas_reads" "$meas_writes" "$DISPATCH" extr="" - fi - done < "$TMP" + meas_us="" + meas_reads="" + meas_writes="" + } - # summary output - echo - echo "Benchmark Summary for attempt #$attempt:" - for l in "${summary_lines[@]}"; do - echo " $l" - done + while IFS= read -r line; do + if [[ $line =~ Extrinsic:\ \"([[:alnum:]_]+)\" ]]; then + finalize_extr + extr="${BASH_REMATCH[1]}" + continue + fi + + if [[ $line =~ Time\ ~=\ *([0-9]+(\.[0-9]+)?) ]]; then + meas_us="${BASH_REMATCH[1]}" + continue + fi + + if [[ $line =~ Reads[[:space:]]*=[[:space:]]*([0-9]+) ]]; then + meas_reads="${BASH_REMATCH[1]}" + continue + fi + + if [[ $line =~ Writes[[:space:]]*=[[:space:]]*([0-9]+) ]]; then + meas_writes="${BASH_REMATCH[1]}" + continue + fi + done < "$TMP" + + finalize_extr - if (( fail )); then echo - echo "❌ Issues detected on attempt #$attempt:" - for e in "${failures[@]}"; do - echo " • $e" + echo "Benchmark Summary for pallet '$pallet_name' (attempt #$attempt):" + for l in "${summary_lines[@]}"; do + echo " $l" done - if (( attempt < MAX_RETRIES )); then - echo "→ Retrying…" - (( attempt++ )) - continue + if (( fail )); then + echo + echo "❌ Issues detected on attempt #$attempt (pallet '$pallet_name'):" + for e in "${failures[@]}"; do + echo " • $e" + done + + if (( attempt < MAX_RETRIES )); then + echo "→ Retrying…" + (( attempt++ )) + continue + else + echo + echo "❌ Benchmarks for pallet '$pallet_name' failed after $MAX_RETRIES attempts." + exit 1 + fi else echo - echo "❌ Benchmarks failed after $MAX_RETRIES attempts." - exit 1 + echo "✅ Pallet '$pallet_name' benchmarks all good within ±${THRESHOLD}% drift." + pallet_success=1 + break fi - else - echo - echo "✅ All benchmarks within ±${THRESHOLD}% drift." - exit 0 + done + + # If we never succeeded for this pallet, exit + if (( pallet_success == 0 )); then + echo "❌ Could not benchmark pallet '$pallet_name' successfully." + exit 1 fi done + +echo +echo "══════════════════════════════════════" +echo "All requested pallets benchmarked successfully!" +echo "══════════════════════════════════════" +exit 0 diff --git a/scripts/map_consensus.py b/scripts/map_consensus.py new file mode 100644 index 0000000000..1d09207bf3 --- /dev/null +++ b/scripts/map_consensus.py @@ -0,0 +1,144 @@ +import re +import sys +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.pyplot import cm + + +def extract_data(filepath): + """ + Extracts the emission data from a text file. + + Args: + filepath: Path to the data file. + + Returns: + A list of lists containing the numerical data, or None if an error occurs. + """ + try: + with open(filepath, "r") as f: + content = f.read() + except FileNotFoundError: + print(f"Error: File not found at {filepath}") + return None + + # Regular expression to extract data rows. Matches strings like "[0.51, 1.00, 1.00, ...]" + # Explanation: + # \[ Matches the opening square bracket. + # (?: ... ) Non-capturing group. + # [0-9.]+ Matches one or more digits or decimal points. + # ,\s* Matches a comma followed by zero or more whitespace characters. + # + Matches the previous group (number and comma) one or more times. + # [0-9.]+ Matches the last number in the list. + # \] Matches the closing square bracket. + + list_pattern = ( + r"\[(?:[0-9.]+,\s*)+[0-9.]+\]" + ) # Regular expression to match data rows + matches = re.findall(list_pattern, content) + + if not matches: + print("Error: No matching data found in the file.") + return None + + data = [] + for match in matches: + try: + # Extract numerical values from the matched string. + # 1. match[1:-1]: Removes the square brackets from the beginning and end. + # 2. .split(','): Splits the string into a list of strings at each comma. + # 3. [float(x.strip()) for x in ...]: Converts each string to a float + # after removing leading/trailing whitespace. + + row = [float(x.strip()) for x in match[1:-1].split(",")] + data.append(row) + except ValueError: + print(f"Warning: Skipping invalid data row: {match}") + + return data + + +def visualize_data(emission_data, output_filename="consensus_plot.svg"): + """ + Generates and saves a contour plot of the retention map. + + Args: + emission_data: The extracted emission data. + output_filename: The name of the output SVG file. + """ + major_ratios = {} + avg_weight_devs = {} + + # Process the data to organize it by major stake + for ( + major_stake, + major_weight, + minor_weight, + avg_weight_dev, + major_ratio, + ) in emission_data: + major_stake_str = f"{major_stake:.2f}" + maj_idx, min_idx = int(round(50 * major_weight)), int(round(50 * minor_weight)) + + avg_weight_devs.setdefault(major_stake_str, np.zeros((51, 51))) + avg_weight_devs[major_stake_str][maj_idx][min_idx] = avg_weight_dev + + major_ratios.setdefault(major_stake_str, np.zeros((51, 51))) + major_ratios[major_stake_str][maj_idx][min_idx] = major_ratio + + # Create the meshgrid for the contour plot + x = np.linspace(0, 1, 51) + y = np.linspace(0, 1, 51) + x, y = np.meshgrid(x, y, indexing="ij") + + # Set up the plot + fig = plt.figure(figsize=(6, 6), dpi=70) + ax = fig.gca() + ax.set_xticks(np.arange(0, 1, 0.05)) + ax.set_yticks(np.arange(0, 1.0, 0.05)) + ax.set_xticklabels([f"{_:.2f}"[1:] for _ in np.arange(0, 1.0, 0.05)]) + plt.grid(linestyle="dotted", color=[0.85, 0.85, 0.85]) + + # Define stakes and colors for contour lines + isolate = ["0.60"] # Stakes to highlight + stakes = [0.51, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99] + colors = cm.viridis(np.linspace(0, 1, len(stakes) + 1)) + + # Create contour lines for each stake + for i, stake in enumerate(stakes): + contours = plt.contour( + x, + y, + major_ratios[f"{stake:.2f}"], + levels=[0.0, stake], + colors=[colors[i + 1]], + ) + if f"{stake:.2f}" in isolate: + contours.collections[1].set_linewidth(3) # Highlight isolated stake + plt.clabel(contours, inline=True, fontsize=10) + + # Add title and labels + plt.title(f"Major emission [$stake_{{maj}}=emission_{{maj}}$ retention lines]") + plt.ylabel("Minor self-weight") + plt.xlabel("Major self-weight") + + # Save the plot + plt.savefig(output_filename, format="svg") + print(f"Plot saved to {output_filename}") + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print( + "Usage: python scripts/map_consensus.py [optional_output_filename]" + ) + sys.exit(1) + + filepath = sys.argv[1] + output_filename = "consensus_plot.svg" # Default output filename + if len(sys.argv) >= 3: + output_filename = sys.argv[2] # Optional output filename + + extracted_data = extract_data(filepath) + if extracted_data: + visualize_data(extracted_data, output_filename)