diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml new file mode 100644 index 0000000000..1a574eb1d8 --- /dev/null +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -0,0 +1,292 @@ +name: Bittensor Bittensor E2E Test + +permissions: + pull-requests: write + contents: read + +concurrency: + group: e2e-cli-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + branches: + - devnet + - devnet-ready + - testnet + - testnet-ready + - main + types: [opened, synchronize, reopened, labeled, unlabeled] + +env: + CARGO_TERM_COLOR: always + VERBOSE: ${{ github.event.inputs.verbose }} + +jobs: + apply-label-to-new-pr: + runs-on: ubuntu-latest + if: ${{ github.event.pull_request.draft == false }} + outputs: + should_continue: ${{ steps.check.outputs.should_continue }} + steps: + - name: Check + id: check + run: | + ACTION="${{ github.event.action }}" + if [[ "$ACTION" == "opened" || "$ACTION" == "reopened" ]]; then + echo "should_continue=true" >> $GITHUB_OUTPUT + else + echo "should_continue=false" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: Add label + if: steps.check.outputs.should_continue == 'true' + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + labels: run-bittensor-e2e-tests + + check-label: + needs: apply-label-to-new-pr + runs-on: ubuntu-latest + if: always() + outputs: + run-bittensor-e2e-tests: ${{ steps.get-labels.outputs.run-bittensor-e2e-tests }} + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Get labels from PR + id: get-labels + run: | + LABELS=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + echo "Current labels: $LABELS" + if echo "$LABELS" | grep -q "run-bittensor-e2e-tests"; then + echo "run-bittensor-e2e-tests=true" >> $GITHUB_ENV + echo "::set-output name=run-bittensor-e2e-tests::true" + else + echo "run-bittensor-e2e-tests=false" >> $GITHUB_ENV + echo "::set-output name=run-bittensor-e2e-tests::false" + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + find-btcli-e2e-tests: + needs: check-label + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + outputs: + test-files: ${{ steps.get-btcli-tests.outputs.test-files }} + steps: + - name: Research preparation + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/btcli.git + + - name: Checkout + working-directory: ${{ github.workspace }}/btcli + run: git checkout staging + + - name: Install dependencies + run: sudo apt-get install -y jq + + - name: Find e2e test files + id: get-btcli-tests + run: | + test_files=$(find ${{ github.workspace }}/btcli/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') + echo "::set-output name=test-files::$test_files" + shell: bash + + find-sdk-e2e-tests: + needs: check-label + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + outputs: + test-files: ${{ steps.get-sdk-tests.outputs.test-files }} + steps: + - name: Research preparation + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/bittensor.git + + - name: Checkout + working-directory: ${{ github.workspace }}/bittensor + run: git checkout staging + + - name: Install dependencies + run: sudo apt-get install -y jq + + - name: Find e2e test files + id: get-sdk-tests + run: | + test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') + echo "::set-output name=test-files::$test_files" + shell: bash + + build-image-with-current-branch: + needs: check-label + runs-on: SubtensorCI + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker Image + run: docker build -f Dockerfile-localnet -t localnet . + + - name: Save Docker Image as Tar + run: docker save -o subtensor-localnet.tar localnet + + - name: Upload Docker Image as Artifact + uses: actions/upload-artifact@v4 + with: + name: subtensor-localnet + path: subtensor-localnet.tar + + # main btcli job + run-btcli-e2e-tests: + needs: + - check-label + - find-btcli-e2e-tests + - build-image-with-current-branch + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + strategy: + fail-fast: false + max-parallel: 16 + matrix: + rust-branch: + - stable + rust-target: + - x86_64-unknown-linux-gnu + os: + - ubuntu-latest + test-file: ${{ fromJson(needs.find-btcli-e2e-tests.outputs.test-files) }} + + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + TARGET: ${{ matrix.rust-target }} + + timeout-minutes: 60 + name: "cli: ${{ matrix.test-file }}" + steps: + - name: Check-out repository + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }} + run: uv venv ${{ github.workspace }}/venv + + - name: Clone Bittensor CLI repo + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/btcli.git + + - name: Setup Bittensor-cli from cloned repo + working-directory: ${{ github.workspace }}/btcli + run: | + source ${{ github.workspace }}/venv/bin/activate + git checkout staging + git fetch origin staging + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + + - name: Install uv dependencies + working-directory: ${{ github.workspace }}/btcli + run: uv sync --all-extras --dev + + - name: Download Cached Docker Image + uses: actions/download-artifact@v4 + with: + name: subtensor-localnet + + - name: Load Docker Image + run: docker load -i subtensor-localnet.tar + + - name: Run tests + working-directory: ${{ github.workspace }}/btcli + run: | + source ${{ github.workspace }}/venv/bin/activate + uv run pytest ${{ matrix.test-file }} -s + + # main sdk job + run-sdk-e2e-tests: + needs: + - check-label + - find-sdk-e2e-tests + - build-image-with-current-branch + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + strategy: + fail-fast: false + max-parallel: 16 + matrix: + rust-branch: + - stable + rust-target: + - x86_64-unknown-linux-gnu + os: + - ubuntu-latest + test-file: ${{ fromJson(needs.find-sdk-e2e-tests.outputs.test-files) }} + + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + TARGET: ${{ matrix.rust-target }} + + timeout-minutes: 60 + name: "sdk: ${{ matrix.test-file }}" + steps: + - name: Check-out repository + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }} + run: uv venv ${{ github.workspace }}/venv + + - name: Clone Bittensor SDK repo + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/bittensor.git + + - name: Setup Bittensor SDK from cloned repo + working-directory: ${{ github.workspace }}/bittensor + run: | + source ${{ github.workspace }}/venv/bin/activate + git checkout staging + git fetch origin staging + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + + - name: Install uv dependencies + working-directory: ${{ github.workspace }}/bittensor + run: uv sync --all-extras --dev + + - name: Download Cached Docker Image + uses: actions/download-artifact@v4 + with: + name: subtensor-localnet + + - name: Load Docker Image + run: docker load -i subtensor-localnet.tar + + - name: Run tests + working-directory: ${{ github.workspace }}/bittensor + run: | + source ${{ github.workspace }}/venv/bin/activate + uv run pytest ${{ matrix.test-file }} -s \ No newline at end of file diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml new file mode 100644 index 0000000000..c2afccae66 --- /dev/null +++ b/.github/workflows/docker-localnet.yml @@ -0,0 +1,69 @@ +name: Publish Localnet Docker Image + +on: + release: + types: [published] + workflow_dispatch: + inputs: + branch-or-tag: + description: "Branch or tag to use for the Docker image tag and ref to checkout (optional)" + required: false + default: "" + push: + branches: + - devnet-ready + +permissions: + contents: read + packages: write + actions: read + security-events: write + +jobs: + publish: + runs-on: SubtensorCI + + steps: + - name: Determine Docker tag and ref + id: tag + run: | + branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" + echo "Determined branch or tag: $branch_or_tag" + echo "tag=$branch_or_tag" >> $GITHUB_ENV + echo "ref=$branch_or_tag" >> $GITHUB_ENV + + # Check if this is a tagged release (not devnet-ready/devnet/testnet) + if [[ "$branch_or_tag" != "devnet-ready" ]]; then + echo "latest_tag=true" >> $GITHUB_ENV + else + echo "latest_tag=false" >> $GITHUB_ENV + fi + + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.ref }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile-localnet + push: true + platforms: linux/amd64,linux/arm64 + tags: | + ghcr.io/${{ github.repository }}-localnet:${{ env.tag }} + ${{ env.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 904027bde2..3eb52ab86f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,7 +22,7 @@ permissions: security-events: write jobs: - publish: + publish-x86: runs-on: SubtensorCI steps: @@ -47,24 +47,70 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . push: true - platforms: linux/amd64,linux/arm64 + platforms: linux/amd64 + tags: | + ghcr.io/${{ github.repository }}:${{ env.tag }} + ${{ env.latest_tag == 'true' && format('ghcr.io/{0}:latest', github.repository) || '' }} + publish-arm: + runs-on: SubtensorCI + + steps: + - name: Determine Docker tag and ref + id: tag + run: | + branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" + echo "Determined branch or tag: $branch_or_tag" + echo "tag=$branch_or_tag" >> $GITHUB_ENV + echo "ref=$branch_or_tag" >> $GITHUB_ENV + + # Check if this is a tagged release (not devnet-ready/devnet/testnet) + if [[ "${{ github.event_name }}" == "release" && "$branch_or_tag" != "devnet-ready" && "$branch_or_tag" != "devnet" && "$branch_or_tag" != "testnet" ]]; then + echo "latest_tag=true" >> $GITHUB_ENV + else + echo "latest_tag=false" >> $GITHUB_ENV + fi + + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.ref }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: . + push: true + platforms: linux/arm64 tags: | ghcr.io/${{ github.repository }}:${{ env.tag }} ${{ env.latest_tag == 'true' && format('ghcr.io/{0}:latest', github.repository) || '' }} diff --git a/.github/workflows/e2e-bittensor-tests.yml b/.github/workflows/e2e-bittensor-tests.yml deleted file mode 100644 index 5be78c2ec2..0000000000 --- a/.github/workflows/e2e-bittensor-tests.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: E2E Bittensor Tests - -concurrency: - group: e2e-bittensor-${{ github.ref }} - cancel-in-progress: true - -on: - pull_request: - - ## Allow running workflow manually from the Actions tab - workflow_dispatch: - inputs: - verbose: - description: "Output more information when triggered manually" - required: false - default: "" - -env: - CARGO_TERM_COLOR: always - VERBOSE: ${{ github.events.input.verbose }} - -jobs: - run: - runs-on: SubtensorCI - env: - RUST_BACKTRACE: full - steps: - - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v4 - - - name: Utilize Shared Rust Cache - uses: Swatinem/rust-cache@v2 - - - name: Install dependencies - run: | - sudo apt-get update && - sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler - - - name: Clone bittensor repo - run: git clone https://github.com/opentensor/bittensor.git - - - name: Setup bittensor repo - working-directory: ${{ github.workspace }}/bittensor - run: | - git checkout staging - python3 -m pip install -e . - python3 -m pip install torch - python3 -m pip install pytest - python3 -m pip install -r requirements/dev.txt - - - name: Run tests - working-directory: ${{ github.workspace }}/bittensor - run: | - pwd - ls - LOCALNET_SH_PATH="${{ github.workspace }}/scripts/localnet.sh" pytest tests/e2e_tests/ -s diff --git a/.github/workflows/try-runtime.yml b/.github/workflows/try-runtime.yml index c3b54a3514..1241ca94d0 100644 --- a/.github/workflows/try-runtime.yml +++ b/.github/workflows/try-runtime.yml @@ -50,7 +50,7 @@ jobs: check-finney: name: check finney - if: github.base_ref == 'testnet' || github.base_ref == 'devnet' || github.base_ref == 'main' + # if: github.base_ref == 'testnet' || github.base_ref == 'devnet' || github.base_ref == 'main' runs-on: SubtensorCI steps: - name: Checkout sources diff --git a/Cargo.lock b/Cargo.lock index d6ba6b07f6..4f32fb735d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,11 +23,11 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.2" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ - "gimli 0.31.1", + "gimli 0.31.0", ] [[package]] @@ -92,7 +92,7 @@ dependencies = [ "getrandom 0.2.15", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -106,15 +106,15 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.21" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-primitives" -version = "0.8.23" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eacedba97e65cdc7ab592f2b22ef5d3ab8d60b2056bc3a6e6363577e8270ec6f" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" dependencies = [ "alloy-rlp", "bytes", @@ -122,15 +122,15 @@ dependencies = [ "const-hex", "derive_more 2.0.1", "foldhash", - "indexmap 2.7.1", + "indexmap 2.6.0", "itoa", "k256", "keccak-asm", "paste", "proptest", - "rand", + "rand 0.8.5", "ruint", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "serde", "sha3", "tiny-keccak", @@ -163,9 +163,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -178,44 +178,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", - "once_cell", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "approx" @@ -237,7 +236,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -620,7 +619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -630,7 +629,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", "rayon", ] @@ -664,7 +663,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 1.0.69", + "thiserror", "time", ] @@ -680,7 +679,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 1.0.69", + "thiserror", "time", ] @@ -704,7 +703,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", "synstructure 0.13.1", ] @@ -727,7 +726,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -749,9 +748,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ "async-lock", "cfg-if", @@ -760,7 +759,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -772,20 +771,20 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.3.1", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -829,13 +828,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.2.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -850,11 +849,11 @@ version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ - "addr2line 0.24.2", + "addr2line 0.24.1", "cfg-if", "libc", "miniz_oxide", - "object 0.36.7", + "object 0.36.4", "rustc-demangle", "windows-targets 0.52.6", ] @@ -916,13 +915,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.29", + "prettyplease 0.2.22", "proc-macro2", "quote", "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -964,9 +963,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitvec" @@ -1025,9 +1024,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.5" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8ee0c1824c4dea5b5f81736aff91bae041d2c07ee1192bec91054e10e3e601e" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", "arrayvec", @@ -1056,9 +1055,9 @@ dependencies = [ [[package]] name = "bounded-collections" -version = "0.2.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d077619e9c237a5d1875166f5e8033e8f6bff0c96f8caf81e1c2d7738c431bf" +checksum = "d32385ecb91a31bddaf908e8dcf4a15aef1bcd3913cc03ebfad02ff6d568abc1" dependencies = [ "log", "parity-scale-codec", @@ -1092,9 +1091,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -1110,9 +1109,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" [[package]] name = "byteorder" @@ -1122,9 +1121,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.9.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2-sys" @@ -1158,9 +1157,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.9" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -1173,10 +1172,10 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.25", + "semver 1.0.23", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -1187,9 +1186,9 @@ checksum = "fd6c0e7b807d60291f42f33f58480c0bfafe28ed08286446f45e463728cf9c1c" [[package]] name = "cc" -version = "1.2.10" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -1262,9 +1261,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1334,9 +1333,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.27" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -1344,9 +1343,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.27" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -1357,21 +1356,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "codespan-reporting" @@ -1380,14 +1379,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" dependencies = [ "termcolor", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "combine" @@ -1401,13 +1400,13 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.3" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" +checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ "strum 0.26.3", "strum_macros 0.26.4", - "unicode-width 0.2.0", + "unicode-width", ] [[package]] @@ -1427,15 +1426,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.10" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", + "lazy_static", "libc", - "once_cell", - "unicode-width 0.2.0", - "windows-sys 0.59.0", + "unicode-width", + "windows-sys 0.52.0", ] [[package]] @@ -1531,9 +1530,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.17" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1662,9 +1661,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1690,15 +1689,15 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.21" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" @@ -1707,7 +1706,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array 0.14.7", - "rand_core", + "rand_core 0.6.4", "subtle 2.6.1", "zeroize", ] @@ -1719,7 +1718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", - "rand_core", + "rand_core 0.6.4", "typenum 1.17.0", ] @@ -1776,66 +1775,51 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "cxx" -version = "1.0.137" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc894913dccfed0f84106062c284fa021c3ba70cb1d78797d6f5165d4492e45" +checksum = "54ccead7d199d584d139148b04b4a368d1ec7556a1d9ea2548febb1b9d49f9a4" dependencies = [ "cc", - "cxxbridge-cmd", "cxxbridge-flags", "cxxbridge-macro", - "foldhash", "link-cplusplus", ] [[package]] name = "cxx-build" -version = "1.0.137" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "503b2bfb6b3e8ce7f95d865a67419451832083d3186958290cee6c53e39dfcfe" +checksum = "c77953e99f01508f89f55c494bfa867171ef3a6c8cea03d26975368f2121a5c1" dependencies = [ "cc", "codespan-reporting", + "once_cell", "proc-macro2", "quote", "scratch", - "syn 2.0.96", -] - -[[package]] -name = "cxxbridge-cmd" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d2cb64a95b4b5a381971482235c4db2e0208302a962acdbe314db03cbbe2fb" -dependencies = [ - "clap", - "codespan-reporting", - "proc-macro2", - "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "cxxbridge-flags" -version = "1.0.137" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f797b0206463c9c2a68ed605ab28892cca784f1ef066050f4942e3de26ad885" +checksum = "65777e06cc48f0cb0152024c77d6cf9e4bdb4408e7b48bea993d42fa0f5b02b6" [[package]] name = "cxxbridge-macro" -version = "1.0.137" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79010a2093848e65a3e0f7062d3f02fb2ef27f866416dfe436fccfa73d3bb59" +checksum = "98532a60dedaebc4848cb2cba5023337cc9ea3af16a5b062633fabfd9f18fb60" dependencies = [ "proc-macro2", "quote", - "rustversion", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -1859,7 +1843,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -1870,7 +1854,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -1888,15 +1872,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.7.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.16" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b16d9d0d88a5273d830dac8b78ceb217ffc9b1d5404e5597a3542515329405b" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1904,12 +1888,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.14" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", - "syn 2.0.96", + "syn 1.0.109", ] [[package]] @@ -1979,7 +1963,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -1992,16 +1976,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.96", -] - -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl 1.0.0", + "syn 2.0.90", ] [[package]] @@ -2010,18 +1985,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ - "derive_more-impl 2.0.1", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", + "derive_more-impl", ] [[package]] @@ -2032,7 +1996,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", "unicode-xid", ] @@ -2122,23 +2086,23 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "docify" -version = "0.2.9" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a772b62b1837c8f060432ddcc10b17aae1453ef17617a99bc07789252d2a5896" +checksum = "43a2f138ad521dc4a2ced1a4576148a6a610b4c5923933b062a263130a6802ce" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.9" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60e6be249b0a462a14784a99b19bf35a667bb5e09de611738bb7362fa4c95ff7" +checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", "derive-syn-parse", @@ -2146,7 +2110,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.96", + "syn 2.0.90", "termcolor", "toml 0.8.19", "walkdir", @@ -2172,9 +2136,9 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dyn-clonable" -version = "0.9.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a36efbb9bfd58e1723780aa04b61aba95ace6a05d9ffabfdb0b43672552f0805" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" dependencies = [ "dyn-clonable-impl", "dyn-clone", @@ -2182,13 +2146,13 @@ dependencies = [ [[package]] name = "dyn-clonable-impl" -version = "0.9.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8671d54058979a37a26f3511fbf8d198ba1aa35ffb202c42587d918d77213a" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 1.0.109", ] [[package]] @@ -2230,7 +2194,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2 0.10.8", "subtle 2.6.1", @@ -2247,7 +2211,7 @@ dependencies = [ "ed25519", "hashbrown 0.14.5", "hex", - "rand_core", + "rand_core 0.6.4", "sha2 0.10.8", "zeroize", ] @@ -2274,7 +2238,7 @@ dependencies = [ "generic-array 0.14.7", "group", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sec1", "serdect", "subtle 2.6.1", @@ -2283,9 +2247,9 @@ dependencies = [ [[package]] name = "encode_unicode" -version = "1.0.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "enum-as-inner" @@ -2308,27 +2272,27 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "enumflags2" -version = "0.7.11" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2f4b465f5318854c6f8dd686ede6c0a9dc67d4b1ac241cf0eb51521a309147" +checksum = "d232db7f5956f3f14313dc2f87985c58bd2c695ce124c8cdd984e08e15ac133d" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.7.11" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4caf64a58d7a6d65ab00639b046ff54399a39f5f2554728895ace4b297cd79" +checksum = "de0d48a183585823424a4ce1aa132d174a6a81bd540895822eb4c8373a8e49e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -2358,12 +2322,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2374,9 +2338,9 @@ checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", - "impl-codec 0.6.0", + "impl-codec", "impl-rlp", - "impl-serde 0.4.0", + "impl-serde", "scale-info", "tiny-keccak", ] @@ -2407,12 +2371,12 @@ checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ "ethbloom", "fixed-hash", - "impl-codec 0.6.0", + "impl-codec", "impl-rlp", - "impl-serde 0.4.0", - "primitive-types 0.12.2", + "impl-serde", + "primitive-types", "scale-info", - "uint 0.9.5", + "uint", ] [[package]] @@ -2423,9 +2387,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -2434,11 +2398,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.3.1", "pin-project-lite", ] @@ -2456,7 +2420,7 @@ dependencies = [ "evm-runtime", "log", "parity-scale-codec", - "primitive-types 0.12.2", + "primitive-types", "rlp", "scale-info", "serde", @@ -2470,7 +2434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1da6cedc5cedb4208e59467106db0d1f50db01b920920589f8e672c02fdc04f" dependencies = [ "parity-scale-codec", - "primitive-types 0.12.2", + "primitive-types", "scale-info", "serde", ] @@ -2484,7 +2448,7 @@ dependencies = [ "environmental", "evm-core", "evm-runtime", - "primitive-types 0.12.2", + "primitive-types", ] [[package]] @@ -2496,7 +2460,7 @@ dependencies = [ "auto_impl", "environmental", "evm-core", - "primitive-types 0.12.2", + "primitive-types", "sha3", ] @@ -2518,10 +2482,10 @@ dependencies = [ "blake2 0.10.6", "file-guard", "fs-err", - "prettyplease 0.2.29", + "prettyplease 0.2.22", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -2538,9 +2502,9 @@ checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fastrand" -version = "2.3.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fastrlp" @@ -2589,7 +2553,7 @@ dependencies = [ "sp-block-builder", "sp-consensus", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -2668,7 +2632,7 @@ dependencies = [ "pallet-evm", "parity-scale-codec", "prometheus", - "rand", + "rand 0.8.5", "rlp", "sc-client-api", "sc-consensus-aura", @@ -2695,7 +2659,7 @@ dependencies = [ "sp-storage 21.0.0", "sp-timestamp", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", "tokio", ] @@ -2738,7 +2702,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -2747,7 +2711,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle 2.6.1", ] @@ -2812,7 +2776,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -2851,9 +2815,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "foreign-types" @@ -2894,7 +2858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" dependencies = [ "nonempty", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -2903,7 +2867,7 @@ version = "1.0.0-dev" source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac882333afed827053f31ef56ab739f7a2e" dependencies = [ "hex", - "impl-serde 0.4.0", + "impl-serde", "libsecp256k1", "log", "parity-scale-codec", @@ -3041,7 +3005,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "rand", + "rand 0.8.5", "rand_pcg", "sc-block-builder", "sc-chain-spec", @@ -3067,7 +3031,7 @@ dependencies = [ "sp-storage 21.0.0", "sp-trie", "sp-wasm-interface 21.0.1", - "thiserror 1.0.69", + "thiserror", "thousands", ] @@ -3174,7 +3138,7 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -3187,7 +3151,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -3199,7 +3163,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -3210,7 +3174,7 @@ checksum = "68672b9ec6fe72d259d3879dc212c5e42e977588cdac830c76f54d9f492aeb58" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -3220,7 +3184,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -3375,9 +3339,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", "pin-project-lite", @@ -3391,7 +3355,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -3492,14 +3456,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", ] [[package]] @@ -3508,8 +3472,8 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" dependencies = [ - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", ] [[package]] @@ -3545,15 +3509,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "governor" @@ -3570,7 +3534,7 @@ dependencies = [ "parking_lot 0.12.3", "portable-atomic", "quanta", - "rand", + "rand 0.8.5", "smallvec", "spinning_top", ] @@ -3582,7 +3546,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core", + "rand_core 0.6.4", "subtle 2.6.1", ] @@ -3598,7 +3562,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.7.1", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3607,17 +3571,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.2.0", - "indexmap 2.7.1", + "http 1.1.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3641,7 +3605,7 @@ dependencies = [ "pest_derive", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -3709,11 +3673,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.10.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.14.5", ] [[package]] @@ -3802,11 +3766,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.11" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3833,9 +3797,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -3860,7 +3824,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.2.0", + "http 1.1.0", ] [[package]] @@ -3871,16 +3835,16 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.10.0" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3896,9 +3860,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.32" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -3911,7 +3875,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.8", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3920,15 +3884,15 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.7", - "http 1.2.0", + "h2 0.4.6", + "http 1.1.0", "http-body 1.0.1", "httparse", "httpdate", @@ -3946,7 +3910,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.32", + "hyper 0.14.30", "log", "rustls 0.21.12", "rustls-native-certs", @@ -3962,9 +3926,9 @@ checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.5.0", "pin-project-lite", "tokio", "tower-service", @@ -3993,124 +3957,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -4140,23 +3986,12 @@ dependencies = [ [[package]] name = "idna" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", + "unicode-bidi", + "unicode-normalization", ] [[package]] @@ -4171,9 +4006,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ "async-io", "core-foundation", @@ -4182,10 +4017,6 @@ dependencies = [ "if-addrs", "ipnet", "log", - "netlink-packet-core", - "netlink-packet-route", - "netlink-proto", - "netlink-sys", "rtnetlink", "system-configuration", "tokio", @@ -4203,9 +4034,9 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.32", + "hyper 0.14.30", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -4220,26 +4051,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-codec" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67aa010c1e3da95bf151bd8b4c059b2ed7e75387cdb969b4f8f2723a43f9941" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-num-traits" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "803d15461ab0dcc56706adf266158acbc44ccf719bf7d0af30705f58b90a4b8c" -dependencies = [ - "integer-sqrt", - "num-traits", - "uint 0.10.0", -] - [[package]] name = "impl-rlp" version = "0.3.0" @@ -4258,24 +4069,15 @@ dependencies = [ "serde", ] -[[package]] -name = "impl-serde" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a143eada6a1ec4aefa5049037a26a6d597bfd64f8c026d07b77133e02b7dd0b" -dependencies = [ - "serde", -] - [[package]] name = "impl-trait-for-tuples" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 1.0.109", ] [[package]] @@ -4310,9 +4112,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -4368,7 +4170,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.8", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -4376,19 +4178,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is-terminal" -version = "0.4.15" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ "hermit-abi 0.4.0", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4426,9 +4228,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" @@ -4441,19 +4243,18 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ - "once_cell", "wasm-bindgen", ] [[package]] name = "jsonrpsee" -version = "0.24.8" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "834af00800e962dee8f7bfc0f60601de215e73e78e5497d733a2919da837d3c8" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ "jsonrpsee-core", "jsonrpsee-proc-macros", @@ -4465,51 +4266,51 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.8" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76637f6294b04e747d68e69336ef839a3493ca62b35bf488ead525f7da75c5bb" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", "http-body-util", "jsonrpsee-types", "parking_lot 0.12.3", - "rand", - "rustc-hash 2.1.0", + "rand 0.8.5", + "rustc-hash 2.1.1", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror", "tokio", "tracing", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.8" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcae0c6c159e11541080f1f829873d8f374f81eda0abc67695a13fc8dc1a580" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "jsonrpsee-server" -version = "0.24.8" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66b7a3df90a1a60c3ed68e7ca63916b53e9afa928e33531e87f61a9c8e9ae87b" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" dependencies = [ "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.5.0", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", @@ -4518,7 +4319,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror 1.0.69", + "thiserror", "tokio", "tokio-stream", "tokio-util", @@ -4528,14 +4329,14 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.8" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddb81adb1a5ae9182df379e374a79e24e992334e7346af4d065ae5b2acb8d4c6" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" dependencies = [ - "http 1.2.0", + "http 1.1.0", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -4624,15 +4425,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -4640,9 +4441,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.11" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" @@ -4678,7 +4479,7 @@ dependencies = [ "multiaddr 0.18.2", "pin-project", "rw-stream-sink", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -4719,16 +4520,16 @@ dependencies = [ "libp2p-identity", "log", "multiaddr 0.18.2", - "multihash 0.19.3", + "multihash 0.19.2", "multistream-select", "once_cell", "parking_lot 0.12.3", "pin-project", "quick-protobuf", - "rand", + "rand 0.8.5", "rw-stream-sink", "smallvec", - "thiserror 1.0.69", + "thiserror", "unsigned-varint 0.7.2", "void", ] @@ -4768,24 +4569,24 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 1.0.69", + "thiserror", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.10" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" dependencies = [ "bs58 0.5.1", "ed25519-dalek", "hkdf", - "multihash 0.19.3", + "multihash 0.19.2", "quick-protobuf", - "rand", + "rand 0.8.5", "sha2 0.10.8", - "thiserror 1.0.69", + "thiserror", "tracing", "zeroize", ] @@ -4810,11 +4611,11 @@ dependencies = [ "log", "quick-protobuf", "quick-protobuf-codec", - "rand", + "rand 0.8.5", "sha2 0.10.8", "smallvec", - "thiserror 1.0.69", - "uint 0.9.5", + "thiserror", + "uint", "unsigned-varint 0.7.2", "void", ] @@ -4832,9 +4633,9 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand", + "rand 0.8.5", "smallvec", - "socket2 0.5.8", + "socket2 0.5.7", "tokio", "trust-dns-proto 0.22.0", "void", @@ -4870,14 +4671,14 @@ dependencies = [ "libp2p-identity", "log", "multiaddr 0.18.2", - "multihash 0.19.3", + "multihash 0.19.2", "once_cell", "quick-protobuf", - "rand", + "rand 0.8.5", "sha2 0.10.8", "snow", "static_assertions", - "thiserror 1.0.69", + "thiserror", "x25519-dalek", "zeroize", ] @@ -4896,7 +4697,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand", + "rand 0.8.5", "void", ] @@ -4916,11 +4717,11 @@ dependencies = [ "log", "parking_lot 0.12.3", "quinn 0.10.2", - "rand", + "rand 0.8.5", "ring 0.16.20", "rustls 0.21.12", - "socket2 0.5.8", - "thiserror 1.0.69", + "socket2 0.5.7", + "thiserror", "tokio", ] @@ -4937,7 +4738,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand", + "rand 0.8.5", "smallvec", "void", ] @@ -4959,7 +4760,7 @@ dependencies = [ "log", "multistream-select", "once_cell", - "rand", + "rand 0.8.5", "smallvec", "tokio", "void", @@ -4975,7 +4776,7 @@ dependencies = [ "proc-macro-warning 0.4.2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -4991,7 +4792,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.8", + "socket2 0.5.7", "tokio", ] @@ -5009,7 +4810,7 @@ dependencies = [ "ring 0.16.20", "rustls 0.21.12", "rustls-webpki", - "thiserror 1.0.69", + "thiserror", "x509-parser 0.15.1", "yasna", ] @@ -5060,7 +4861,7 @@ dependencies = [ "pin-project-lite", "rw-stream-sink", "soketto", - "thiserror 1.0.69", + "thiserror", "url", "webpki-roots", ] @@ -5074,7 +4875,7 @@ dependencies = [ "futures", "libp2p-core", "log", - "thiserror 1.0.69", + "thiserror", "yamux", ] @@ -5084,9 +4885,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", "libc", - "redox_syscall 0.5.8", + "redox_syscall 0.5.7", ] [[package]] @@ -5106,18 +4907,18 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +checksum = "e79019718125edc905a079a70cfa5f3820bc76139fc91d6f9abc27ea2a887139" dependencies = [ "arrayref", - "base64 0.13.1", + "base64 0.22.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand", + "rand 0.8.5", "serde", "sha2 0.9.9", "typenum 1.17.0", @@ -5165,9 +4966,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -5191,18 +4992,18 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linked_hash_set" -version = "0.1.5" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae85b5be22d9843c80e5fc80e9b64c8a3b1f98f867c709956eca3efff4e92e2" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ "linked-hash-map", ] [[package]] name = "linregress" -version = "0.5.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9eda9dcf4f2a99787827661f312ac3219292549c2ee992bf9a6248ffb066bf7" +checksum = "4de04dcecc58d366391f9920245b85ffa684558a5ef6e7736e754347c3aea9c2" dependencies = [ "nalgebra", ] @@ -5215,9 +5016,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.15" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lioness" @@ -5231,12 +5032,6 @@ dependencies = [ "keystream", ] -[[package]] -name = "litemap" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" - [[package]] name = "litep2p" version = "0.6.2" @@ -5251,7 +5046,7 @@ dependencies = [ "futures", "futures-timer", "hex-literal", - "indexmap 2.7.1", + "indexmap 2.6.0", "libc", "mockall 0.12.1", "multiaddr 0.17.1", @@ -5263,7 +5058,7 @@ dependencies = [ "prost 0.12.6", "prost-build 0.11.9", "quinn 0.9.4", - "rand", + "rand 0.8.5", "rcgen", "ring 0.16.20", "rustls 0.20.9", @@ -5272,17 +5067,17 @@ dependencies = [ "simple-dns", "smallvec", "snow", - "socket2 0.5.8", + "socket2 0.5.7", "static_assertions", "str0m", - "thiserror 1.0.69", + "thiserror", "tokio", "tokio-stream", "tokio-tungstenite", "tokio-util", "tracing", "trust-dns-resolver", - "uint 0.9.5", + "uint", "unsigned-varint 0.8.0", "url", "webpki", @@ -5304,9 +5099,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lru" @@ -5337,9 +5132,9 @@ dependencies = [ [[package]] name = "lz4" -version = "1.28.1" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20b523e860d03443e98350ceaac5e71c6ba89aea7d960769ec3ce37f4de5af4" +checksum = "4d1febb2b4a79ddd1980eede06a8f7902197960aa0383ffcfdd62fe723036725" dependencies = [ "lz4-sys", ] @@ -5372,7 +5167,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -5386,7 +5181,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -5397,7 +5192,7 @@ checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -5408,7 +5203,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -5454,7 +5249,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.38.44", + "rustix 0.38.37", ] [[package]] @@ -5515,7 +5310,7 @@ checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ "byteorder", "keccak", - "rand_core", + "rand_core 0.6.4", "zeroize", ] @@ -5527,19 +5322,20 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi 0.3.9", "libc", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", @@ -5562,11 +5358,11 @@ dependencies = [ "lioness", "log", "parking_lot 0.12.3", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_distr", "subtle 2.6.1", - "thiserror 1.0.69", + "thiserror", "zeroize", ] @@ -5596,7 +5392,7 @@ dependencies = [ "fragile", "lazy_static", "mockall_derive 0.12.1", - "predicates 3.1.3", + "predicates 3.1.2", "predicates-tree", ] @@ -5621,7 +5417,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -5654,7 +5450,7 @@ dependencies = [ "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.3", + "multihash 0.19.2", "percent-encoding", "serde", "static_assertions", @@ -5709,9 +5505,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ "core2", "unsigned-varint 0.8.0", @@ -5737,12 +5533,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" -[[package]] -name = "multimap" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" - [[package]] name = "multistream-select" version = "0.13.0" @@ -5759,12 +5549,13 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.33.2" +version = "0.32.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" +checksum = "7b5c17de023a86f59ed79891b2e5d5a94c705dbe904a5b5c9c952ea6221b03e4" dependencies = [ "approx", "matrixmultiply", + "nalgebra-macros", "num-complex", "num-rational", "num-traits", @@ -5772,20 +5563,31 @@ dependencies = [ "typenum 1.17.0", ] +[[package]] +name = "nalgebra-macros" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "names" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] name = "native-tls" -version = "0.2.13" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ "libc", "log", @@ -5813,20 +5615,21 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.7.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" dependencies = [ "anyhow", "byteorder", + "libc", "netlink-packet-utils", ] [[package]] name = "netlink-packet-route" -version = "0.17.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -5845,28 +5648,29 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror 1.0.69", + "thiserror", ] [[package]] name = "netlink-proto" -version = "0.11.5" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" +checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror 2.0.11", + "thiserror", + "tokio", ] [[package]] name = "netlink-sys" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ "bytes", "futures", @@ -5883,15 +5687,15 @@ checksum = "a4a43439bf756eed340bdf8feba761e2d50c7d47175d87545cd5cbe4a137c4d1" dependencies = [ "cc", "libc", - "thiserror 1.0.69", + "thiserror", "winapi", ] [[package]] name = "nix" -version = "0.26.4" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -5983,7 +5787,7 @@ dependencies = [ "subtensor-custom-rpc", "subtensor-custom-rpc-runtime-api", "subtensor-runtime-common", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -6026,7 +5830,7 @@ dependencies = [ "pallet-membership", "pallet-multisig", "pallet-preimage", - "pallet-proxy", + "pallet-proxy 38.0.0", "pallet-registry", "pallet-safe-mode", "pallet-scheduler", @@ -6035,10 +5839,10 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", + "pallet-utility 38.0.0", "parity-scale-codec", "precompile-utils", - "rand_chacha", + "rand_chacha 0.3.1", "scale-info", "serde_json", "sha2 0.10.8", @@ -6229,7 +6033,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -6255,9 +6059,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -6282,9 +6086,12 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] [[package]] name = "opaque-debug" @@ -6304,7 +6111,7 @@ version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -6321,20 +6128,20 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "openssl-probe" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.4.1+3.4.0" +version = "300.4.0+3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c" +checksum = "a709e02f2b4aca747929cca5ed248880847c650233cf8b8cdc48f40aaf4898a6" dependencies = [ "cc", ] @@ -6471,18 +6278,27 @@ dependencies = [ name = "pallet-commitments" version = "4.0.0-dev" dependencies = [ + "ark-serialize 0.4.2", "enumflags2", "frame-benchmarking", "frame-support", "frame-system", + "hex", + "log", "pallet-balances", + "pallet-drand", + "pallet-subtensor", "parity-scale-codec", + "rand_chacha 0.3.1", "scale-info", + "sha2 0.10.8", "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "subtensor-macros", + "tle", + "w3f-bls", ] [[package]] @@ -6699,6 +6515,23 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "pallet-proxy" +version = "38.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-utility 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "subtensor-macros", +] + [[package]] name = "pallet-proxy" version = "38.0.0" @@ -6730,6 +6563,20 @@ dependencies = [ "subtensor-macros", ] +[[package]] +name = "pallet-root-testing" +version = "14.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", +] + [[package]] name = "pallet-safe-mode" version = "19.0.0" @@ -6740,8 +6587,8 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "pallet-proxy", - "pallet-utility", + "pallet-proxy 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "pallet-utility 38.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -6798,6 +6645,7 @@ dependencies = [ "frame-system", "hex", "hex-literal", + "libsecp256k1", "log", "ndarray", "num-traits", @@ -6807,12 +6655,13 @@ dependencies = [ "pallet-membership", "pallet-preimage", "pallet-scheduler", + "pallet-subtensor-swap", "pallet-transaction-payment", - "pallet-utility", + "pallet-utility 38.0.0", "parity-scale-codec", "parity-util-mem", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "safe-math", "scale-info", "serde", @@ -6830,6 +6679,7 @@ dependencies = [ "sp-version", "substrate-fixed", "subtensor-macros", + "subtensor-swap-interface", "tle", "w3f-bls", ] @@ -6840,9 +6690,9 @@ version = "0.1.0" dependencies = [ "alloy-primitives", "approx", + "frame-benchmarking", "frame-support", "frame-system", - "pallet-subtensor-swap-interface", "parity-scale-codec", "safe-math", "scale-info", @@ -6853,13 +6703,9 @@ dependencies = [ "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "substrate-fixed", - "uuid", + "subtensor-swap-interface", ] -[[package]] -name = "pallet-subtensor-swap-interface" -version = "0.1.0" - [[package]] name = "pallet-sudo" version = "38.0.0" @@ -6937,6 +6783,25 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "pallet-utility" +version = "38.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-collective", + "pallet-root-testing", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "subtensor-macros", +] + [[package]] name = "pallet-utility" version = "38.0.0" @@ -6959,8 +6824,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -6980,7 +6845,7 @@ dependencies = [ "lz4", "memmap2 0.5.10", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "siphasher", "snap", "winapi", @@ -7026,7 +6891,7 @@ dependencies = [ "lru 0.8.1", "parity-util-mem-derive", "parking_lot 0.12.3", - "primitive-types 0.12.2", + "primitive-types", "smallvec", "winapi", ] @@ -7097,7 +6962,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.8", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -7115,7 +6980,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle 2.6.1", ] @@ -7158,20 +7023,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.15" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", - "thiserror 2.0.11", + "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.15" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" dependencies = [ "pest", "pest_generator", @@ -7179,22 +7044,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.15" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "pest_meta" -version = "2.7.15" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" dependencies = [ "once_cell", "pest", @@ -7208,34 +7073,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.7.1", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -7268,7 +7133,7 @@ dependencies = [ "libc", "log", "polkavm-assembler", - "polkavm-common 0.9.0", + "polkavm-common", "polkavm-linux-raw", ] @@ -7290,28 +7155,13 @@ dependencies = [ "log", ] -[[package]] -name = "polkavm-common" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ff33982a807d8567645d4784b9b5d7ab87bcb494f534a57cadd9012688e102" - -[[package]] -name = "polkavm-derive" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8c4bea6f3e11cd89bb18bcdddac10bd9a24015399bd1c485ad68a985a19606" -dependencies = [ - "polkavm-derive-impl-macro 0.9.0", -] - [[package]] name = "polkavm-derive" -version = "0.18.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2eb703f3b6404c13228402e98a5eae063fd16b8f58afe334073ec105ee4117e" +checksum = "ae8c4bea6f3e11cd89bb18bcdddac10bd9a24015399bd1c485ad68a985a19606" dependencies = [ - "polkavm-derive-impl-macro 0.18.0", + "polkavm-derive-impl-macro", ] [[package]] @@ -7320,22 +7170,10 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ - "polkavm-common 0.9.0", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "polkavm-derive-impl" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f2116a92e6e96220a398930f4c8a6cda1264206f3e2034fc9982bfd93f261f7" -dependencies = [ - "polkavm-common 0.18.0", + "polkavm-common", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -7344,18 +7182,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ - "polkavm-derive-impl 0.9.0", - "syn 2.0.96", -] - -[[package]] -name = "polkavm-derive-impl-macro" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c16669ddc7433e34c1007d31080b80901e3e8e523cb9d4b441c3910cf9294b" -dependencies = [ - "polkavm-derive-impl 0.18.1", - "syn 2.0.96", + "polkavm-derive-impl", + "syn 2.0.90", ] [[package]] @@ -7368,7 +7196,7 @@ dependencies = [ "hashbrown 0.14.5", "log", "object 0.32.2", - "polkavm-common 0.9.0", + "polkavm-common", "regalloc2 0.9.3", "rustc-demangle", ] @@ -7381,15 +7209,15 @@ checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120" [[package]] name = "polling" -version = "3.7.4" +version = "3.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.44", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -7419,9 +7247,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.10.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -7435,7 +7263,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -7469,7 +7297,7 @@ source = "git+https://github.com/opentensor/frontier?rev=635bdac882#635bdac88233 dependencies = [ "case", "num_enum", - "prettyplease 0.2.29", + "prettyplease 0.2.22", "proc-macro2", "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", @@ -7492,9 +7320,9 @@ dependencies = [ [[package]] name = "predicates" -version = "3.1.3" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" dependencies = [ "anstyle", "predicates-core", @@ -7502,15 +7330,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.9" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" [[package]] name = "predicates-tree" -version = "1.0.12" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" dependencies = [ "predicates-core", "termtree", @@ -7528,12 +7356,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.29" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -7543,23 +7371,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", - "impl-codec 0.6.0", + "impl-codec", "impl-rlp", - "impl-serde 0.4.0", + "impl-serde", "scale-info", - "uint 0.9.5", -] - -[[package]] -name = "primitive-types" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" -dependencies = [ - "fixed-hash", - "impl-codec 0.7.0", - "impl-num-traits", - "uint 0.10.0", + "uint", ] [[package]] @@ -7568,7 +7384,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ - "thiserror 1.0.69", + "thiserror", "toml 0.5.11", ] @@ -7613,7 +7429,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -7624,14 +7440,14 @@ checksum = "834da187cfe638ae8abb0203f0b33e5ccdb02a28e7199f2f47b3e2754f50edca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -7652,7 +7468,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -7666,7 +7482,7 @@ dependencies = [ "lazy_static", "memchr", "parking_lot 0.12.3", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -7689,7 +7505,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -7700,11 +7516,11 @@ checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.8.0", + "bitflags 2.6.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -7743,7 +7559,7 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "log", - "multimap 0.8.3", + "multimap", "petgraph", "prettyplease 0.1.25", "prost 0.11.9", @@ -7764,14 +7580,14 @@ dependencies = [ "heck 0.5.0", "itertools 0.12.1", "log", - "multimap 0.10.0", + "multimap", "once_cell", "petgraph", - "prettyplease 0.2.29", + "prettyplease 0.2.22", "prost 0.12.6", "prost-types 0.12.6", "regex", - "syn 2.0.96", + "syn 2.0.90", "tempfile", ] @@ -7798,7 +7614,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -7821,18 +7637,18 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.24" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200b9ff220857e53e184257720a14553b2f4aa02577d2ed9842d45d4b9654810" +checksum = "aa37f80ca58604976033fae9515a8a2989fc13797d953f7c04fb8fa36a11f205" dependencies = [ "cc", ] [[package]] name = "quanta" -version = "0.12.5" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" dependencies = [ "crossbeam-utils", "libc", @@ -7867,7 +7683,7 @@ dependencies = [ "asynchronous-codec", "bytes", "quick-protobuf", - "thiserror 1.0.69", + "thiserror", "unsigned-varint 0.7.2", ] @@ -7883,7 +7699,7 @@ dependencies = [ "quinn-udp 0.3.2", "rustc-hash 1.1.0", "rustls 0.20.9", - "thiserror 1.0.69", + "thiserror", "tokio", "tracing", "webpki", @@ -7902,7 +7718,7 @@ dependencies = [ "quinn-udp 0.4.1", "rustc-hash 1.1.0", "rustls 0.21.12", - "thiserror 1.0.69", + "thiserror", "tokio", "tracing", ] @@ -7914,12 +7730,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring 0.16.20", "rustc-hash 1.1.0", "rustls 0.20.9", "slab", - "thiserror 1.0.69", + "thiserror", "tinyvec", "tracing", "webpki", @@ -7932,12 +7748,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring 0.16.20", "rustc-hash 1.1.0", "rustls 0.21.12", "slab", - "thiserror 1.0.69", + "thiserror", "tinyvec", "tracing", ] @@ -7963,20 +7779,26 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.8", + "socket2 0.5.7", "tracing", "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.38" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "radium" version = "0.7.0" @@ -7990,8 +7812,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy 0.8.24", ] [[package]] @@ -8001,7 +7834,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -8013,6 +7856,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + [[package]] name = "rand_distr" version = "0.4.3" @@ -8020,7 +7872,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -8029,7 +7881,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -8038,16 +7890,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] name = "raw-cpuid" -version = "11.3.0" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" +checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", ] [[package]] @@ -8099,11 +7951,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", ] [[package]] @@ -8114,7 +7966,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8134,7 +7986,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -8164,13 +8016,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", + "regex-automata 0.4.8", "regex-syntax 0.8.5", ] @@ -8185,9 +8037,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -8243,15 +8095,14 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" dependencies = [ "cc", "cfg-if", "getrandom 0.2.15", "libc", - "spin 0.9.8", "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -8316,19 +8167,16 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.13.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" dependencies = [ "futures", "log", - "netlink-packet-core", "netlink-packet-route", - "netlink-packet-utils", "netlink-proto", - "netlink-sys", "nix", - "thiserror 1.0.69", + "thiserror", "tokio", ] @@ -8344,9 +8192,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "825df406ec217a8116bd7b06897c6cc8f65ffefc15d030ae2c9540acc9ed50b6" +checksum = "78a46eb779843b2c4f21fac5773e25d6d5b7c8f0922876c91541790d2ca27eef" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -8358,9 +8206,10 @@ dependencies = [ "num-integer", "num-traits", "parity-scale-codec", - "primitive-types 0.12.2", + "primitive-types", "proptest", - "rand", + "rand 0.8.5", + "rand 0.9.0", "rlp", "ruint-macro", "serde", @@ -8388,9 +8237,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc-hex" @@ -8422,7 +8271,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.25", + "semver 1.0.23", ] [[package]] @@ -8450,15 +8299,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.44" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "linux-raw-sys 0.4.14", + "windows-sys 0.52.0", ] [[package]] @@ -8479,7 +8328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.8", + "ring 0.17.13", "rustls-webpki", "sct", ] @@ -8511,15 +8360,15 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.8", + "ring 0.17.13", "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-fork" @@ -8546,9 +8395,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe-math" @@ -8571,9 +8420,9 @@ dependencies = [ [[package]] name = "safe_arch" -version = "0.7.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" +checksum = "c3460605018fdc9612bce72735cba0d27efbcd9904780d44c7e3a9948f96148a" dependencies = [ "bytemuck", ] @@ -8595,7 +8444,7 @@ dependencies = [ "log", "sp-core", "sp-wasm-interface 21.0.1", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8670,7 +8519,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -8689,7 +8538,7 @@ dependencies = [ "names", "parity-bip39", "parity-scale-codec", - "rand", + "rand 0.8.5", "regex", "rpassword", "sc-client-api", @@ -8710,7 +8559,7 @@ dependencies = [ "sp-panic-handler", "sp-runtime", "sp-version", - "thiserror 1.0.69", + "thiserror", "tokio", ] @@ -8788,7 +8637,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8817,7 +8666,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8853,7 +8702,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8885,7 +8734,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -8910,7 +8759,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8930,7 +8779,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -8965,7 +8814,7 @@ dependencies = [ "sp-runtime", "sp-timestamp", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9023,7 +8872,7 @@ dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", "sp-wasm-interface 21.0.1", - "thiserror 1.0.69", + "thiserror", "wasm-instrument", ] @@ -9084,7 +8933,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-keystore", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9113,7 +8962,7 @@ dependencies = [ "sp-keystore", "sp-mixnet", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9144,7 +8993,7 @@ dependencies = [ "pin-project", "prost 0.12.6", "prost-build 0.12.6", - "rand", + "rand 0.8.5", "sc-client-api", "sc-network-common", "sc-network-types", @@ -9158,7 +9007,7 @@ dependencies = [ "sp-core", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", "tokio", "tokio-stream", "unsigned-varint 0.7.2", @@ -9222,7 +9071,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9257,7 +9106,7 @@ dependencies = [ "sp-core", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", "tokio", "tokio-stream", ] @@ -9292,9 +9141,9 @@ dependencies = [ "litep2p", "log", "multiaddr 0.18.2", - "multihash 0.19.3", - "rand", - "thiserror 1.0.69", + "multihash 0.19.2", + "rand 0.8.5", + "thiserror", "zeroize", ] @@ -9308,14 +9157,14 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper 0.14.32", + "hyper 0.14.30", "hyper-rustls", "log", "num_cpus", "once_cell", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "sc-client-api", "sc-network", "sc-network-common", @@ -9390,7 +9239,7 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-version", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9402,9 +9251,9 @@ dependencies = [ "forwarded-header-value", "futures", "governor", - "http 1.2.0", + "http 1.1.0", "http-body-util", - "hyper 1.6.0", + "hyper 1.5.0", "ip_network", "jsonrpsee", "log", @@ -9430,7 +9279,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "sc-chain-spec", "sc-client-api", "sc-rpc", @@ -9444,7 +9293,7 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-version", - "thiserror 1.0.69", + "thiserror", "tokio", "tokio-stream", ] @@ -9464,7 +9313,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "sc-chain-spec", "sc-client-api", "sc-client-db", @@ -9507,7 +9356,7 @@ dependencies = [ "static_init", "substrate-prometheus-endpoint", "tempfile", - "thiserror 1.0.69", + "thiserror", "tokio", "tracing", "tracing-futures", @@ -9533,7 +9382,7 @@ dependencies = [ "futures", "libc", "log", - "rand", + "rand 0.8.5", "rand_pcg", "regex", "sc-telemetry", @@ -9556,12 +9405,12 @@ dependencies = [ "log", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "sc-network", "sc-utils", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror", "wasm-timer", ] @@ -9588,10 +9437,10 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-tracing 17.0.1", - "thiserror 1.0.69", + "thiserror", "tracing", "tracing-log", - "tracing-subscriber 0.3.19", + "tracing-subscriber 0.3.18", ] [[package]] @@ -9602,7 +9451,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -9629,7 +9478,7 @@ dependencies = [ "sp-tracing 17.0.1", "sp-transaction-pool", "substrate-prometheus-endpoint", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9645,7 +9494,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9688,13 +9537,13 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.6" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "bitvec", "cfg-if", - "derive_more 1.0.0", + "derive_more 0.99.18", "parity-scale-codec", "scale-info-derive", "serde", @@ -9702,14 +9551,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.6" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 1.0.109", ] [[package]] @@ -9720,18 +9569,18 @@ checksum = "f0cded6518aa0bd6c1be2b88ac81bf7044992f0f154bfbabd5ad34f43512abcb" [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "schnellru" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356285bbf17bea63d9e52e96bd18f039672ac92b55b8cb997d6162a2a37d1649" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" dependencies = [ "ahash 0.8.11", "cfg-if", @@ -9750,7 +9599,7 @@ dependencies = [ "curve25519-dalek", "getrandom_or_panic", "merlin", - "rand_core", + "rand_core 0.6.4", "serde_bytes", "sha2 0.10.8", "subtle 2.6.1", @@ -9775,7 +9624,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.8", + "ring 0.17.13", "untrusted 0.9.0", ] @@ -9789,9 +9638,9 @@ dependencies = [ "crc", "fxhash", "log", - "rand", + "rand 0.8.5", "slab", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -9842,7 +9691,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -9851,9 +9700,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -9888,9 +9737,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -9918,9 +9767,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.217" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -9955,20 +9804,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -10022,7 +9871,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -10151,14 +10000,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] name = "simba" -version = "0.9.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" +checksum = "061507c94fc6ab4ba1c9a0305018408e312e17c041eb63bef8aa726fa33aceae" dependencies = [ "approx", "num-complex", @@ -10173,7 +10022,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cae9a3fcdadafb6d97f4c0e007e4247b114ee0f119f650c3cbf3a8b3a1479694" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", ] [[package]] @@ -10225,8 +10074,8 @@ dependencies = [ "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek", - "rand_core", - "ring 0.17.8", + "rand_core 0.6.4", + "ring 0.17.13", "rustc_version 0.4.1", "sha2 0.10.8", "subtle 2.6.1", @@ -10244,9 +10093,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -10254,17 +10103,17 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", "bytes", "futures", - "http 1.2.0", + "http 1.1.0", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", ] @@ -10287,7 +10136,7 @@ dependencies = [ "sp-state-machine", "sp-trie", "sp-version", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -10301,7 +10150,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -10364,7 +10213,7 @@ dependencies = [ "sp-database", "sp-runtime", "sp-state-machine", - "thiserror 1.0.69", + "thiserror", "tracing", ] @@ -10380,7 +10229,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -10460,7 +10309,7 @@ dependencies = [ "futures", "hash-db", "hash256-std-hasher", - "impl-serde 0.4.0", + "impl-serde", "itertools 0.11.0", "k256", "libsecp256k1", @@ -10470,8 +10319,8 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "paste", - "primitive-types 0.12.2", - "rand", + "primitive-types", + "rand 0.8.5", "scale-info", "schnorrkel", "secp256k1", @@ -10485,7 +10334,7 @@ dependencies = [ "sp-storage 21.0.0", "ss58-registry", "substrate-bip39", - "thiserror 1.0.69", + "thiserror", "tracing", "w3f-bls", "zeroize", @@ -10494,7 +10343,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -10565,7 +10414,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -10584,23 +10433,23 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "environmental", "parity-scale-codec", @@ -10639,7 +10488,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -10653,7 +10502,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive", "rustversion", "secp256k1", "sp-core", @@ -10694,7 +10543,7 @@ name = "sp-maybe-compressed-blob" version = "11.0.0" source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ - "thiserror 1.0.69", + "thiserror", "zstd 0.12.4", ] @@ -10762,7 +10611,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "paste", - "rand", + "rand 0.8.5", "scale-info", "serde", "simple-mermaid", @@ -10778,13 +10627,13 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.18.0", - "primitive-types 0.13.1", + "polkavm-derive", + "primitive-types", "sp-externalities 0.25.0", "sp-runtime-interface-proc-macro 17.0.0", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", @@ -10802,8 +10651,8 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.9.1", - "primitive-types 0.12.2", + "polkavm-derive", + "primitive-types", "sp-externalities 0.29.0", "sp-runtime-interface-proc-macro 18.0.0", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", @@ -10816,14 +10665,14 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -10836,7 +10685,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -10875,13 +10724,13 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "smallvec", "sp-core", "sp-externalities 0.29.0", "sp-panic-handler", "sp-trie", - "thiserror 1.0.69", + "thiserror", "tracing", "trie-db", ] @@ -10896,7 +10745,7 @@ dependencies = [ "ed25519-dalek", "hkdf", "parity-scale-codec", - "rand", + "rand 0.8.5", "scale-info", "sha2 0.10.8", "sp-api", @@ -10906,7 +10755,7 @@ dependencies = [ "sp-externalities 0.29.0", "sp-runtime", "sp-runtime-interface 28.0.0", - "thiserror 1.0.69", + "thiserror", "x25519-dalek", ] @@ -10918,14 +10767,14 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ - "impl-serde 0.5.0", + "impl-serde", "parity-scale-codec", "ref-cast", "serde", @@ -10937,7 +10786,7 @@ name = "sp-storage" version = "21.0.0" source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ - "impl-serde 0.4.0", + "impl-serde", "parity-scale-codec", "ref-cast", "serde", @@ -10953,18 +10802,18 @@ dependencies = [ "parity-scale-codec", "sp-inherents", "sp-runtime", - "thiserror 1.0.69", + "thiserror", ] [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "parity-scale-codec", "tracing", "tracing-core", - "tracing-subscriber 0.3.19", + "tracing-subscriber 0.3.18", ] [[package]] @@ -10975,7 +10824,7 @@ dependencies = [ "parity-scale-codec", "tracing", "tracing-core", - "tracing-subscriber 0.3.19", + "tracing-subscriber 0.3.18", ] [[package]] @@ -11013,12 +10862,12 @@ dependencies = [ "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "scale-info", "schnellru", "sp-core", "sp-externalities 0.29.0", - "thiserror 1.0.69", + "thiserror", "tracing", "trie-db", "trie-root", @@ -11029,7 +10878,7 @@ name = "sp-version" version = "37.0.0" source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ - "impl-serde 0.4.0", + "impl-serde", "parity-scale-codec", "parity-wasm", "scale-info", @@ -11038,7 +10887,7 @@ dependencies = [ "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "sp-version-proc-macro", - "thiserror 1.0.69", + "thiserror", ] [[package]] @@ -11049,13 +10898,13 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#80e30ec3cdccae8e9099bd67840ff8737b043496" +source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774ac1da0a422b33f34e2" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11123,11 +10972,21 @@ dependencies = [ "der", ] +[[package]] +name = "sqlformat" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" +dependencies = [ + "nom", + "unicode_categories", +] + [[package]] name = "sqlx" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4410e73b3c0d8442c5f99b425d7a435b5ee0ae4167b3196771dd3f7a01be745f" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" dependencies = [ "sqlx-core", "sqlx-macros", @@ -11136,31 +10995,37 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a007b6936676aa9ab40207cde35daab0a04b823be8ae004368c0793b96a61e0" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" dependencies = [ + "atoi", + "byteorder", "bytes", "crc", "crossbeam-queue", "either", - "event-listener 5.4.0", + "event-listener 5.3.1", + "futures-channel", "futures-core", "futures-intrusive", "futures-io", "futures-util", - "hashbrown 0.15.2", - "hashlink 0.10.0", - "indexmap 2.7.1", + "hashbrown 0.14.5", + "hashlink 0.9.1", + "hex", + "indexmap 2.6.0", "log", "memchr", "native-tls", "once_cell", + "paste", "percent-encoding", "serde", "sha2 0.10.8", "smallvec", - "thiserror 2.0.11", + "sqlformat", + "thiserror", "tokio", "tokio-stream", "tracing", @@ -11169,22 +11034,22 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3112e2ad78643fef903618d78cf0aec1cb3134b019730edb039b69eaf531f310" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ "proc-macro2", "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "sqlx-macros-core" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9f90acc5ab146a99bf5061a7eb4976b573f560bc898ef3bf8435448dd5e7ad" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" dependencies = [ "dotenvy", "either", @@ -11198,7 +11063,7 @@ dependencies = [ "sha2 0.10.8", "sqlx-core", "sqlx-sqlite", - "syn 2.0.96", + "syn 2.0.90", "tempfile", "tokio", "url", @@ -11206,9 +11071,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f85ca71d3a5b24e64e1d08dd8fe36c6c95c339a896cc33068148906784620540" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" dependencies = [ "atoi", "flume", @@ -11229,9 +11094,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.51.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19409f13998e55816d1c728395af0b52ec066206341d939e22e7766df9b494b8" +checksum = "43fce22ed1df64d04b262351c8f9d5c6da4f76f79f25ad15529792f893fad25d" dependencies = [ "Inflector", "num-format", @@ -11290,9 +11155,9 @@ dependencies = [ [[package]] name = "static_init_macro" -version = "1.0.4" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1389c88ddd739ec6d3f8f83343764a0e944cd23cfbf126a9796a714b0b6edd6f" +checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases", "memchr", @@ -11317,7 +11182,7 @@ dependencies = [ "sctp-proto", "serde", "sha-1", - "thiserror 1.0.69", + "thiserror", "tracing", ] @@ -11365,7 +11230,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -11422,11 +11287,11 @@ version = "0.17.0" source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "http-body-util", - "hyper 1.6.0", + "hyper 1.5.0", "hyper-util", "log", "prometheus", - "thiserror 1.0.69", + "thiserror", "tokio", ] @@ -11469,7 +11334,7 @@ dependencies = [ "quote", "rayon", "subtensor-linting", - "syn 2.0.96", + "syn 2.0.90", "walkdir", ] @@ -11507,7 +11372,7 @@ dependencies = [ "proc-macro2", "procedural-fork", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -11517,7 +11382,7 @@ dependencies = [ "ahash 0.8.11", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -11535,7 +11400,7 @@ dependencies = [ "pallet-evm-precompile-modexp", "pallet-evm-precompile-sha3fips", "pallet-evm-precompile-simple", - "pallet-proxy", + "pallet-proxy 38.0.0", "pallet-subtensor", "precompile-utils", "sp-core", @@ -11555,13 +11420,23 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "subtensor-swap-interface" +version = "0.1.0" +dependencies = [ + "frame-support", + "parity-scale-codec", + "scale-info", + "uuid", +] + [[package]] name = "subtensor-tools" version = "0.1.0" dependencies = [ "anyhow", "clap", - "semver 1.0.25", + "semver 1.0.23", "toml_edit", ] @@ -11590,9 +11465,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -11619,25 +11494,25 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags 2.8.0", + "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.6.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" dependencies = [ "core-foundation-sys", "libc", @@ -11657,15 +11532,14 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.16.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", - "getrandom 0.3.1", "once_cell", - "rustix 0.38.44", + "rustix 0.38.37", "windows-sys 0.59.0", ] @@ -11680,58 +11554,38 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ - "rustix 0.38.44", + "rustix 0.38.37", "windows-sys 0.59.0", ] [[package]] name = "termtree" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" - -[[package]] -name = "thiserror" -version = "1.0.69" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", -] +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "2.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" -dependencies = [ - "thiserror-impl 2.0.11", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", + "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -11771,9 +11625,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -11792,9 +11646,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -11809,21 +11663,11 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -11851,8 +11695,8 @@ dependencies = [ "chacha20poly1305", "generic-array 0.14.7", "parity-scale-codec", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "scale-info", "serde", "serde_cbor", @@ -11864,9 +11708,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.43.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -11875,20 +11719,20 @@ dependencies = [ "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.8", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] @@ -11903,9 +11747,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -11930,9 +11774,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -11978,7 +11822,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -12006,9 +11850,9 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", "bytes", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", "http-body-util", "pin-project-lite", @@ -12030,9 +11874,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", @@ -12042,20 +11886,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -12093,9 +11937,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -12148,10 +11992,10 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "rand", + "rand 0.8.5", "smallvec", "socket2 0.4.10", - "thiserror 1.0.69", + "thiserror", "tinyvec", "tokio", "tracing", @@ -12174,9 +12018,9 @@ dependencies = [ "idna 0.4.0", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "smallvec", - "thiserror 1.0.69", + "thiserror", "tinyvec", "tokio", "tracing", @@ -12195,10 +12039,10 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror 1.0.69", + "thiserror", "tokio", "tracing", "trust-dns-proto 0.23.2", @@ -12228,10 +12072,10 @@ dependencies = [ "http 0.2.12", "httparse", "log", - "rand", + "rand 0.8.5", "rustls 0.21.12", "sha1", - "thiserror 1.0.69", + "thiserror", "url", "utf-8", ] @@ -12244,7 +12088,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -12281,18 +12125,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "uint" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unarray" version = "0.1.4" @@ -12301,15 +12133,15 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-bidi" -version = "0.3.18" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" @@ -12326,18 +12158,18 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" -[[package]] -name = "unicode-width" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" - [[package]] name = "unicode-xid" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "universal-hash" version = "0.5.1" @@ -12384,12 +12216,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna 0.5.0", "percent-encoding", ] @@ -12399,18 +12231,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "utf8parse" version = "0.2.2" @@ -12423,14 +12243,14 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", ] [[package]] name = "valuable" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" @@ -12465,12 +12285,12 @@ dependencies = [ "arrayref", "constcat", "digest 0.10.7", - "rand", - "rand_chacha", - "rand_core", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "sha2 0.10.8", "sha3", - "thiserror 1.0.69", + "thiserror", "zeroize", ] @@ -12510,57 +12330,56 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", "once_cell", - "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", + "once_cell", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", - "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12568,25 +12387,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-instrument" @@ -12608,7 +12424,7 @@ dependencies = [ "strum 0.24.1", "strum_macros 0.24.3", "tempfile", - "thiserror 1.0.69", + "thiserror", "wasm-opt-cxx-sys", "wasm-opt-sys", ] @@ -12735,7 +12551,7 @@ dependencies = [ "log", "object 0.30.4", "target-lexicon", - "thiserror 1.0.69", + "thiserror", "wasmparser", "wasmtime-cranelift-shared", "wasmtime-environ", @@ -12770,7 +12586,7 @@ dependencies = [ "object 0.30.4", "serde", "target-lexicon", - "thiserror 1.0.69", + "thiserror", "wasmparser", "wasmtime-types", ] @@ -12837,7 +12653,7 @@ dependencies = [ "memfd", "memoffset", "paste", - "rand", + "rand 0.8.5", "rustix 0.36.17", "wasmtime-asm-macros", "wasmtime-environ", @@ -12853,15 +12669,15 @@ checksum = "a4f6fffd2a1011887d57f07654dd112791e872e3ff4a2e626aee8059ee17f06f" dependencies = [ "cranelift-entity", "serde", - "thiserror 1.0.69", + "thiserror", "wasmparser", ] [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -12873,7 +12689,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.8", + "ring 0.17.13", "untrusted 0.9.0", ] @@ -12892,14 +12708,14 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.44", + "rustix 0.38.37", ] [[package]] name = "wide" -version = "0.7.32" +version = "0.7.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" +checksum = "b828f995bf1e9622031f8009f8481a85406ce1f4d4588ff746d872043e855690" dependencies = [ "bytemuck", "safe_arch", @@ -12944,38 +12760,28 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.53.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows-core 0.53.0", - "windows-targets 0.52.6", + "windows-core 0.51.1", + "windows-targets 0.48.5", ] [[package]] name = "windows-core" -version = "0.52.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] name = "windows-core" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" -dependencies = [ - "windows-result", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.1.2" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ "windows-targets 0.52.6", ] @@ -13211,9 +13017,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.25" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -13230,25 +13036,13 @@ dependencies = [ [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "wyz" version = "0.5.1" @@ -13265,7 +13059,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "serde", "zeroize", ] @@ -13283,7 +13077,7 @@ dependencies = [ "nom", "oid-registry 0.6.1", "rusticata-macros", - "thiserror 1.0.69", + "thiserror", "time", ] @@ -13300,7 +13094,7 @@ dependencies = [ "nom", "oid-registry 0.7.1", "rusticata-macros", - "thiserror 1.0.69", + "thiserror", "time", ] @@ -13312,14 +13106,14 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] name = "xml-rs" -version = "0.8.25" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" @@ -13341,7 +13135,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -13355,37 +13149,22 @@ dependencies = [ ] [[package]] -name = "yoke" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.5" +name = "zerocopy" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", - "synstructure 0.13.1", + "byteorder", + "zerocopy-derive 0.7.35", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.8.24", ] [[package]] @@ -13396,28 +13175,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", -] - -[[package]] -name = "zerofrom" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" -dependencies = [ - "zerofrom-derive", + "syn 2.0.90", ] [[package]] -name = "zerofrom-derive" -version = "0.1.5" +name = "zerocopy-derive" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", - "synstructure 0.13.1", + "syn 2.0.90", ] [[package]] @@ -13437,29 +13206,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 20f3f03507..56aaaa8368 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,12 +51,13 @@ pallet-collective = { default-features = false, path = "pallets/collective" } pallet-commitments = { default-features = false, path = "pallets/commitments" } pallet-registry = { default-features = false, path = "pallets/registry" } pallet-subtensor = { default-features = false, path = "pallets/subtensor" } +pallet-subtensor-swap = { default-features = false, path = "pallets/swap" } safe-math = { default-features = false, path = "primitives/safe-math" } subtensor-custom-rpc = { default-features = false, path = "pallets/subtensor/rpc" } subtensor-custom-rpc-runtime-api = { default-features = false, path = "pallets/subtensor/runtime-api" } subtensor-precompiles = { default-features = false, path = "precompiles" } subtensor-runtime-common = { default-features = false, path = "common" } -pallet-subtensor-swap-interface = { default-features = false, path = "pallets/swap-interface" } +subtensor-swap-interface = { default-features = false, path = "pallets/swap-interface" } async-trait = "0.1" cargo-husky = { version = "1", default-features = false } @@ -72,6 +73,7 @@ futures = "0.3.30" hex = { version = "0.4", default-features = false } hex-literal = "0.4.1" jsonrpsee = { version = "0.24.4", default-features = false } +libsecp256k1 = { version = "0.7.2", default-features = false } log = { version = "0.4.21", default-features = false } memmap2 = "0.9.4" ndarray = { version = "0.15.6", default-features = false } @@ -122,7 +124,7 @@ pallet-insecure-randomness-collective-flip = { git = "https://github.com/parityt pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-proxy = { path = "pallets/proxy", default-features = false } pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } @@ -130,7 +132,8 @@ pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-utility = { path = "pallets/utility", default-features = false } +pallet-root-testing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } diff --git a/Dockerfile-localnet b/Dockerfile-localnet new file mode 100644 index 0000000000..0de11cb866 --- /dev/null +++ b/Dockerfile-localnet @@ -0,0 +1,67 @@ +ARG BASE_IMAGE=ubuntu:latest + +FROM $BASE_IMAGE AS builder +SHELL ["/bin/bash", "-c"] + +# Set noninteractive mode for apt-get +ARG DEBIAN_FRONTEND=noninteractive + +LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ + ai.opentensor.image.vendor="Opentensor Foundation" \ + ai.opentensor.image.title="opentensor/subtensor-localnet" \ + ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ + ai.opentensor.image.documentation="https://docs.bittensor.com" + +# Set up Rust environment +ENV RUST_BACKTRACE=1 + +RUN apt-get update +RUN apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + +# Copy entire repository +COPY . /build +WORKDIR /build + +# Install Rust +RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y +ENV PATH="/root/.cargo/bin:${PATH}" +RUN rustup toolchain install +RUN rustup target add wasm32-unknown-unknown + +## Build fast-blocks node +RUN ./scripts/localnet.sh --build-only +# Build non-fast-blocks +RUN ./scripts/localnet.sh False --build-only + +# Verify the binaries was produced +RUN test -e /build/target/fast-blocks/release/node-subtensor +RUN test -e /build/target/non-fast-blocks/release/node-subtensor + +FROM $BASE_IMAGE AS subtensor-localnet + +# Copy binaries +COPY --from=builder /build/target/fast-blocks/release/node-subtensor target/fast-blocks/release/node-subtensor +RUN chmod +x target/fast-blocks/release/node-subtensor + +COPY --from=builder /build/target/non-fast-blocks/release/node-subtensor target/non-fast-blocks/release/node-subtensor +RUN chmod +x target/non-fast-blocks/release/node-subtensor + +COPY --from=builder /build/snapshot.json /snapshot.json + +COPY --from=builder /build/scripts/localnet.sh scripts/localnet.sh +RUN chmod +x /scripts/localnet.sh + +## Ubdate certificates +RUN apt-get update && apt-get install -y ca-certificates + +# Do not build (just run) +ENV BUILD_BINARY=0 +# Switch to local run with IP 0.0.0.0 within docker image +ENV RUN_IN_DOCKER=1 +# Expose ports +EXPOSE 30334 30335 9944 9945 + +ENTRYPOINT ["/scripts/localnet.sh"] +# Fast blocks defaults to True, you can disable it by passing False to the docker command, e.g.: +# docker run ghcr.io/opentensor/subtensor-localnet False +CMD ["True"] diff --git a/evm-tests/.gitignore b/evm-tests/.gitignore new file mode 100644 index 0000000000..661f94a6e0 --- /dev/null +++ b/evm-tests/.gitignore @@ -0,0 +1,3 @@ +node_modules +.papi +.env diff --git a/evm-tests/README.md b/evm-tests/README.md new file mode 100644 index 0000000000..7d01034bd8 --- /dev/null +++ b/evm-tests/README.md @@ -0,0 +1,27 @@ +# type-test + +test with ts + +## polkadot api + +```bash +npx papi add devnet -w ws://10.0.0.11:9944 +``` + +## get the new metadata + +```bash +sh get-metadata.sh +``` + +## run all tests + +```bash +yarn run test +``` + +## To run a particular test case, you can pass an argument with the name or part of the name. For example: + +```bash +yarn run test -- -g "Can set subnet parameter" +``` diff --git a/evm-tests/get-metadata.sh b/evm-tests/get-metadata.sh new file mode 100644 index 0000000000..6d7727009d --- /dev/null +++ b/evm-tests/get-metadata.sh @@ -0,0 +1,3 @@ +rm -rf .papi +npx papi add devnet -w ws://localhost:9944 + diff --git a/evm-tests/local.test.ts b/evm-tests/local.test.ts new file mode 100644 index 0000000000..9eb24d4327 --- /dev/null +++ b/evm-tests/local.test.ts @@ -0,0 +1,53 @@ +import * as assert from "assert"; +import { getAliceSigner, getClient, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { SUB_LOCAL_URL, } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" +import { ethers } from "ethers" +import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" +import { generateRandomEthersWallet } from "../src/utils" +import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister } from "../src/subtensor" + +describe("Test neuron precompile Serve Axon Prometheus", () => { + // init eth part + // const wallet1 = generateRandomEthersWallet(); + // const wallet2 = generateRandomEthersWallet(); + // const wallet3 = generateRandomEthersWallet(); + + // init substrate part + + // const coldkey = getRandomSubstrateKeypair(); + + let api: TypedApi + + // sudo account alice as signer + let alice: PolkadotSigner; + before(async () => { + // init variables got from await and async + const subClient = await getClient(SUB_LOCAL_URL) + api = await getDevnetApi() + // alice = await getAliceSigner(); + + // await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + // await forceSetBalanceToEthAddress(api, wallet1.address) + // await forceSetBalanceToEthAddress(api, wallet2.address) + // await forceSetBalanceToEthAddress(api, wallet3.address) + + + let index = 0; + while (index < 30) { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + } + + + }) + + it("Serve Axon", async () => { + + }); +}); \ No newline at end of file diff --git a/evm-tests/package.json b/evm-tests/package.json new file mode 100644 index 0000000000..a96a2c4a0c --- /dev/null +++ b/evm-tests/package.json @@ -0,0 +1,31 @@ +{ + "scripts": { + "test": "mocha --timeout 999999 --require ts-node/register test/*test.ts" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "@polkadot-api/descriptors": "file:.papi/descriptors", + "@polkadot-labs/hdkd": "^0.0.10", + "@polkadot-labs/hdkd-helpers": "^0.0.11", + "@polkadot/api": "15.1.1", + "crypto": "^1.0.1", + "dotenv": "16.4.7", + "ethers": "^6.13.5", + "polkadot-api": "^1.9.5", + "viem": "2.23.4" + }, + "devDependencies": { + "@types/bun": "^1.1.13", + "@types/chai": "^5.0.1", + "@types/mocha": "^10.0.10", + "assert": "^2.1.0", + "chai": "^5.2.0", + "mocha": "^11.1.0", + "prettier": "^3.3.3", + "ts-node": "^10.9.2", + "typescript": "^5.7.2", + "vite": "^5.4.8" + } +} diff --git a/evm-tests/src/address-utils.ts b/evm-tests/src/address-utils.ts new file mode 100644 index 0000000000..ed3abc5008 --- /dev/null +++ b/evm-tests/src/address-utils.ts @@ -0,0 +1,77 @@ +import { Address } from "viem" +import { encodeAddress } from "@polkadot/util-crypto"; +import { ss58Address } from "@polkadot-labs/hdkd-helpers"; +import { hexToU8a } from "@polkadot/util"; +import { blake2AsU8a, decodeAddress } from "@polkadot/util-crypto"; +import { Binary } from "polkadot-api"; +import { SS58_PREFIX } from "./config" + +export function toViemAddress(address: string): Address { + let addressNoPrefix = address.replace("0x", "") + return `0x${addressNoPrefix}` +} + +export function convertH160ToSS58(ethAddress: string) { + // get the public key + const hash = convertH160ToPublicKey(ethAddress); + + // Convert the hash to SS58 format + const ss58Address = encodeAddress(hash, SS58_PREFIX); + return ss58Address; +} + +export function convertPublicKeyToSs58(publickey: Uint8Array) { + return ss58Address(publickey, SS58_PREFIX); +} + +export function convertH160ToPublicKey(ethAddress: string) { + const prefix = "evm:"; + const prefixBytes = new TextEncoder().encode(prefix); + const addressBytes = hexToU8a( + ethAddress.startsWith("0x") ? ethAddress : `0x${ethAddress}` + ); + const combined = new Uint8Array(prefixBytes.length + addressBytes.length); + + // Concatenate prefix and Ethereum address + combined.set(prefixBytes); + combined.set(addressBytes, prefixBytes.length); + + // Hash the combined data (the public key) + const hash = blake2AsU8a(combined); + return hash; +} + +export function ss58ToEthAddress(ss58Address: string) { + // Decode the SS58 address to a Uint8Array public key + const publicKey = decodeAddress(ss58Address); + + // Take the first 20 bytes of the hashed public key for the Ethereum address + const ethereumAddressBytes = publicKey.slice(0, 20); + + // Convert the 20 bytes into an Ethereum H160 address format (Hex string) + const ethereumAddress = '0x' + Buffer.from(ethereumAddressBytes).toString('hex'); + + return ethereumAddress; +} + +export function ss58ToH160(ss58Address: string): Binary { + // Decode the SS58 address to a Uint8Array public key + const publicKey = decodeAddress(ss58Address); + + // Take the first 20 bytes of the hashed public key for the Ethereum address + const ethereumAddressBytes = publicKey.slice(0, 20); + + + return new Binary(ethereumAddressBytes); +} + +export function ethAddressToH160(ethAddress: string): Binary { + // Decode the SS58 address to a Uint8Array public key + const publicKey = hexToU8a(ethAddress); + + // Take the first 20 bytes of the hashed public key for the Ethereum address + // const ethereumAddressBytes = publicKey.slice(0, 20); + + + return new Binary(publicKey); +} \ No newline at end of file diff --git a/evm-tests/src/balance-math.ts b/evm-tests/src/balance-math.ts new file mode 100644 index 0000000000..8d6e86bd5a --- /dev/null +++ b/evm-tests/src/balance-math.ts @@ -0,0 +1,26 @@ +import assert from "assert" + +export const TAO = BigInt(1000000000) // 10^9 +export const ETH_PER_RAO = BigInt(1000000000) // 10^9 +export const GWEI = BigInt(1000000000) // 10^9 +export const MAX_TX_FEE = BigInt(21000000) * GWEI // 100 times EVM to EVM transfer fee + +export function bigintToRao(value: bigint) { + return TAO * value +} + +export function tao(value: number) { + return TAO * BigInt(value) +} + +export function raoToEth(value: bigint) { + return ETH_PER_RAO * value +} + +export function compareEthBalanceWithTxFee(balance1: bigint, balance2: bigint) { + if (balance1 > balance2) { + assert((balance1 - balance2) < MAX_TX_FEE) + } else { + assert((balance2 - balance1) < MAX_TX_FEE) + } +} diff --git a/evm-tests/src/config.ts b/evm-tests/src/config.ts new file mode 100644 index 0000000000..00b942f802 --- /dev/null +++ b/evm-tests/src/config.ts @@ -0,0 +1,38 @@ +export const ETH_LOCAL_URL = 'http://localhost:9944' +export const SUB_LOCAL_URL = 'ws://localhost:9944' +export const SS58_PREFIX = 42; +// set the tx timeout as 2 second when eable the fast-blocks feature. +export const TX_TIMEOUT = 3000; + +export const IED25519VERIFY_ADDRESS = "0x0000000000000000000000000000000000000402"; +export const IEd25519VerifyABI = [ + { + inputs: [ + { internalType: "bytes32", name: "message", type: "bytes32" }, + { internalType: "bytes32", name: "publicKey", type: "bytes32" }, + { internalType: "bytes32", name: "r", type: "bytes32" }, + { internalType: "bytes32", name: "s", type: "bytes32" }, + ], + name: "verify", + outputs: [{ internalType: "bool", name: "", type: "bool" }], + stateMutability: "pure", + type: "function", + }, +]; + +export const IBALANCETRANSFER_ADDRESS = "0x0000000000000000000000000000000000000800"; +export const IBalanceTransferABI = [ + { + inputs: [ + { + internalType: "bytes32", + name: "data", + type: "bytes32", + }, + ], + name: "transfer", + outputs: [], + stateMutability: "payable", + type: "function", + }, +]; \ No newline at end of file diff --git a/evm-tests/src/contracts/bridgeToken.ts b/evm-tests/src/contracts/bridgeToken.ts new file mode 100644 index 0000000000..f8b3ea4d03 --- /dev/null +++ b/evm-tests/src/contracts/bridgeToken.ts @@ -0,0 +1,631 @@ +export const BRIDGE_TOKEN_CONTRACT_ABI = [ + { + "inputs": [ + { + "internalType": "string", + "name": "name_", + "type": "string" + }, + { + "internalType": "string", + "name": "symbol_", + "type": "string" + }, + { + "internalType": "address", + "name": "admin", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "AccessControlBadConfirmation", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "bytes32", + "name": "neededRole", + "type": "bytes32" + } + ], + "name": "AccessControlUnauthorizedAccount", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "allowance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientAllowance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientBalance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "approver", + "type": "address" + } + ], + "name": "ERC20InvalidApprover", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "receiver", + "type": "address" + } + ], + "name": "ERC20InvalidReceiver", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "ERC20InvalidSender", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "ERC20InvalidSpender", + "type": "error" + }, + { + "inputs": [], + "name": "UnauthorizedHandler", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "previousAdminRole", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "newAdminRole", + "type": "bytes32" + } + ], + "name": "RoleAdminChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleGranted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleRevoked", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [], + "name": "DEFAULT_ADMIN_ROLE", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "burn", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "burnFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + } + ], + "name": "getRoleAdmin", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "grantRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "hasRole", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "isAdmin", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "mint", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "callerConfirmation", + "type": "address" + } + ], + "name": "renounceRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "revokeRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +]; + +export const BRIDGE_TOKEN_CONTRACT_BYTECODE = "0x60806040523480156200001157600080fd5b5060405162000fac38038062000fac8339810160408190526200003491620001ea565b8282600362000044838262000308565b50600462000053828262000308565b5062000065915060009050826200006f565b50505050620003d4565b60008281526005602090815260408083206001600160a01b038516845290915281205460ff16620001185760008381526005602090815260408083206001600160a01b03861684529091529020805460ff19166001179055620000cf3390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45060016200011c565b5060005b92915050565b634e487b7160e01b600052604160045260246000fd5b600082601f8301126200014a57600080fd5b81516001600160401b038082111562000167576200016762000122565b604051601f8301601f19908116603f0116810190828211818310171562000192576200019262000122565b8160405283815260209250866020858801011115620001b057600080fd5b600091505b83821015620001d45785820183015181830184015290820190620001b5565b6000602085830101528094505050505092915050565b6000806000606084860312156200020057600080fd5b83516001600160401b03808211156200021857600080fd5b620002268783880162000138565b945060208601519150808211156200023d57600080fd5b506200024c8682870162000138565b604086015190935090506001600160a01b03811681146200026c57600080fd5b809150509250925092565b600181811c908216806200028c57607f821691505b602082108103620002ad57634e487b7160e01b600052602260045260246000fd5b50919050565b601f82111562000303576000816000526020600020601f850160051c81016020861015620002de5750805b601f850160051c820191505b81811015620002ff57828155600101620002ea565b5050505b505050565b81516001600160401b0381111562000324576200032462000122565b6200033c8162000335845462000277565b84620002b3565b602080601f8311600181146200037457600084156200035b5750858301515b600019600386901b1c1916600185901b178555620002ff565b600085815260208120601f198616915b82811015620003a55788860151825594840194600190910190840162000384565b5085821015620003c45787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b610bc880620003e46000396000f3fe608060405234801561001057600080fd5b506004361061012c5760003560e01c806340c10f19116100ad57806395d89b411161007157806395d89b4114610288578063a217fddf14610290578063a9059cbb14610298578063d547741f146102ab578063dd62ed3e146102be57600080fd5b806340c10f191461021357806342966c681461022657806370a082311461023957806379cc67901461026257806391d148541461027557600080fd5b8063248a9ca3116100f4578063248a9ca3146101a657806324d7806c146101c95780632f2ff15d146101dc578063313ce567146101f157806336568abe1461020057600080fd5b806301ffc9a71461013157806306fdde0314610159578063095ea7b31461016e57806318160ddd1461018157806323b872dd14610193575b600080fd5b61014461013f3660046109ab565b6102f7565b60405190151581526020015b60405180910390f35b61016161032e565b60405161015091906109dc565b61014461017c366004610a47565b6103c0565b6002545b604051908152602001610150565b6101446101a1366004610a71565b6103d8565b6101856101b4366004610aad565b60009081526005602052604090206001015490565b6101446101d7366004610ac6565b6103fc565b6101ef6101ea366004610ae1565b610408565b005b60405160128152602001610150565b6101ef61020e366004610ae1565b610433565b6101ef610221366004610a47565b61046b565b6101ef610234366004610aad565b610480565b610185610247366004610ac6565b6001600160a01b031660009081526020819052604090205490565b6101ef610270366004610a47565b61048d565b610144610283366004610ae1565b6104a2565b6101616104cd565b610185600081565b6101446102a6366004610a47565b6104dc565b6101ef6102b9366004610ae1565b6104ea565b6101856102cc366004610b0d565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b60006001600160e01b03198216637965db0b60e01b148061032857506301ffc9a760e01b6001600160e01b03198316145b92915050565b60606003805461033d90610b37565b80601f016020809104026020016040519081016040528092919081815260200182805461036990610b37565b80156103b65780601f1061038b576101008083540402835291602001916103b6565b820191906000526020600020905b81548152906001019060200180831161039957829003601f168201915b5050505050905090565b6000336103ce81858561050f565b5060019392505050565b6000336103e685828561051c565b6103f1858585610599565b506001949350505050565b600061032881836104a2565b600082815260056020526040902060010154610423816105f8565b61042d8383610602565b50505050565b6001600160a01b038116331461045c5760405163334bd91960e11b815260040160405180910390fd5b6104668282610696565b505050565b6000610476816105f8565b6104668383610703565b61048a338261073d565b50565b6000610498816105f8565b610466838361073d565b60009182526005602090815260408084206001600160a01b0393909316845291905290205460ff1690565b60606004805461033d90610b37565b6000336103ce818585610599565b600082815260056020526040902060010154610505816105f8565b61042d8383610696565b6104668383836001610773565b6001600160a01b03838116600090815260016020908152604080832093861683529290522054600019811461042d578181101561058a57604051637dc7a0d960e11b81526001600160a01b038416600482015260248101829052604481018390526064015b60405180910390fd5b61042d84848484036000610773565b6001600160a01b0383166105c357604051634b637e8f60e11b815260006004820152602401610581565b6001600160a01b0382166105ed5760405163ec442f0560e01b815260006004820152602401610581565b610466838383610848565b61048a8133610972565b600061060e83836104a2565b61068e5760008381526005602090815260408083206001600160a01b03861684529091529020805460ff191660011790556106463390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a4506001610328565b506000610328565b60006106a283836104a2565b1561068e5760008381526005602090815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a4506001610328565b6001600160a01b03821661072d5760405163ec442f0560e01b815260006004820152602401610581565b61073960008383610848565b5050565b6001600160a01b03821661076757604051634b637e8f60e11b815260006004820152602401610581565b61073982600083610848565b6001600160a01b03841661079d5760405163e602df0560e01b815260006004820152602401610581565b6001600160a01b0383166107c757604051634a1406b160e11b815260006004820152602401610581565b6001600160a01b038085166000908152600160209081526040808320938716835292905220829055801561042d57826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b9258460405161083a91815260200190565b60405180910390a350505050565b6001600160a01b0383166108735780600260008282546108689190610b71565b909155506108e59050565b6001600160a01b038316600090815260208190526040902054818110156108c65760405163391434e360e21b81526001600160a01b03851660048201526024810182905260448101839052606401610581565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b03821661090157600280548290039055610920565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161096591815260200190565b60405180910390a3505050565b61097c82826104a2565b6107395760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610581565b6000602082840312156109bd57600080fd5b81356001600160e01b0319811681146109d557600080fd5b9392505050565b60006020808352835180602085015260005b81811015610a0a578581018301518582016040015282016109ee565b506000604082860101526040601f19601f8301168501019250505092915050565b80356001600160a01b0381168114610a4257600080fd5b919050565b60008060408385031215610a5a57600080fd5b610a6383610a2b565b946020939093013593505050565b600080600060608486031215610a8657600080fd5b610a8f84610a2b565b9250610a9d60208501610a2b565b9150604084013590509250925092565b600060208284031215610abf57600080fd5b5035919050565b600060208284031215610ad857600080fd5b6109d582610a2b565b60008060408385031215610af457600080fd5b82359150610b0460208401610a2b565b90509250929050565b60008060408385031215610b2057600080fd5b610b2983610a2b565b9150610b0460208401610a2b565b600181811c90821680610b4b57607f821691505b602082108103610b6b57634e487b7160e01b600052602260045260246000fd5b50919050565b8082018082111561032857634e487b7160e01b600052601160045260246000fdfea2646970667358221220e179fc58c926e64cb6e87416f8ca64c117044e3195b184afe45038857606c15364736f6c63430008160033" diff --git a/evm-tests/src/contracts/incremental.sol b/evm-tests/src/contracts/incremental.sol new file mode 100644 index 0000000000..2b3bc2fd49 --- /dev/null +++ b/evm-tests/src/contracts/incremental.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity >=0.8.2 <0.9.0; + +contract Storage { + uint256 number; + + /** + * @dev Store value in variable + * @param num value to store + */ + function store(uint256 num) public { + number = num; + } + + /** + * @dev Return value + * @return value of 'number' + */ + function retrieve() public view returns (uint256) { + return number; + } +} diff --git a/evm-tests/src/contracts/incremental.ts b/evm-tests/src/contracts/incremental.ts new file mode 100644 index 0000000000..b19909e491 --- /dev/null +++ b/evm-tests/src/contracts/incremental.ts @@ -0,0 +1,39 @@ +export const INCREMENTAL_CONTRACT_ABI = [ + { + "inputs": [], + "name": "retrieve", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "store", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +]; + +/* +"compiler": { + "version": "0.8.26+commit.8a97fa7a" + }, +*/ + +export const INCREMENTAL_CONTRACT_BYTECODE = "6080604052348015600e575f80fd5b506101438061001c5f395ff3fe608060405234801561000f575f80fd5b5060043610610034575f3560e01c80632e64cec1146100385780636057361d14610056575b5f80fd5b610040610072565b60405161004d919061009b565b60405180910390f35b610070600480360381019061006b91906100e2565b61007a565b005b5f8054905090565b805f8190555050565b5f819050919050565b61009581610083565b82525050565b5f6020820190506100ae5f83018461008c565b92915050565b5f80fd5b6100c181610083565b81146100cb575f80fd5b50565b5f813590506100dc816100b8565b92915050565b5f602082840312156100f7576100f66100b4565b5b5f610104848285016100ce565b9150509291505056fea26469706673582212209a0dd35336aff1eb3eeb11db76aa60a1427a12c1b92f945ea8c8d1dfa337cf2264736f6c634300081a0033" + + + diff --git a/evm-tests/src/contracts/metagraph.ts b/evm-tests/src/contracts/metagraph.ts new file mode 100644 index 0000000000..d0c3bf5154 --- /dev/null +++ b/evm-tests/src/contracts/metagraph.ts @@ -0,0 +1,391 @@ +export const IMETAGRAPH_ADDRESS = "0x0000000000000000000000000000000000000802"; + +export const IMetagraphABI = [ + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getAxon", + outputs: [ + { + components: [ + { + internalType: "uint64", + name: "block", + type: "uint64", + }, + { + internalType: "uint32", + name: "version", + type: "uint32", + }, + { + internalType: "uint128", + name: "ip", + type: "uint128", + }, + { + internalType: "uint16", + name: "port", + type: "uint16", + }, + { + internalType: "uint8", + name: "ip_type", + type: "uint8", + }, + { + internalType: "uint8", + name: "protocol", + type: "uint8", + }, + ], + internalType: "struct AxonInfo", + name: "", + type: "tuple", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getColdkey", + outputs: [ + { + internalType: "bytes32", + name: "", + type: "bytes32", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getConsensus", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getDividends", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getEmission", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getHotkey", + outputs: [ + { + internalType: "bytes32", + name: "", + type: "bytes32", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getIncentive", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getIsActive", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getLastUpdate", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getRank", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getStake", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getTrust", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getUidCount", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getValidatorStatus", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "uid", + type: "uint16", + }, + ], + name: "getVtrust", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, +]; \ No newline at end of file diff --git a/evm-tests/src/contracts/neuron.ts b/evm-tests/src/contracts/neuron.ts new file mode 100644 index 0000000000..4a8fb47e4c --- /dev/null +++ b/evm-tests/src/contracts/neuron.ts @@ -0,0 +1,235 @@ +export const INEURON_ADDRESS = "0x0000000000000000000000000000000000000804"; + +export const INeuronABI = [ + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bytes32", + name: "commitHash", + type: "bytes32", + }, + ], + name: "commitWeights", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16[]", + name: "uids", + type: "uint16[]", + }, + { + internalType: "uint16[]", + name: "values", + type: "uint16[]", + }, + { + internalType: "uint16[]", + name: "salt", + type: "uint16[]", + }, + { + internalType: "uint64", + name: "versionKey", + type: "uint64", + }, + ], + name: "revealWeights", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16[]", + name: "dests", + type: "uint16[]", + }, + { + internalType: "uint16[]", + name: "weights", + type: "uint16[]", + }, + { + internalType: "uint64", + name: "versionKey", + type: "uint64", + }, + ], + name: "setWeights", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint32", + name: "version", + type: "uint32", + }, + { + internalType: "uint128", + name: "ip", + type: "uint128", + }, + { + internalType: "uint16", + name: "port", + type: "uint16", + }, + { + internalType: "uint8", + name: "ipType", + type: "uint8", + }, + { + internalType: "uint8", + name: "protocol", + type: "uint8", + }, + { + internalType: "uint8", + name: "placeholder1", + type: "uint8", + }, + { + internalType: "uint8", + name: "placeholder2", + type: "uint8", + }, + ], + name: "serveAxon", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint32", + name: "version", + type: "uint32", + }, + { + internalType: "uint128", + name: "ip", + type: "uint128", + }, + { + internalType: "uint16", + name: "port", + type: "uint16", + }, + { + internalType: "uint8", + name: "ipType", + type: "uint8", + }, + { + internalType: "uint8", + name: "protocol", + type: "uint8", + }, + { + internalType: "uint8", + name: "placeholder1", + type: "uint8", + }, + { + internalType: "uint8", + name: "placeholder2", + type: "uint8", + }, + { + internalType: "bytes", + name: "certificate", + type: "bytes", + }, + ], + name: "serveAxonTls", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint32", + name: "version", + type: "uint32", + }, + { + internalType: "uint128", + name: "ip", + type: "uint128", + }, + { + internalType: "uint16", + name: "port", + type: "uint16", + }, + { + internalType: "uint8", + name: "ipType", + type: "uint8", + }, + ], + name: "servePrometheus", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bytes32", + name: "hotkey", + type: "bytes32", + }, + ], + name: "burnedRegister", + outputs: [], + stateMutability: "payable", + type: "function", + }, +]; \ No newline at end of file diff --git a/evm-tests/src/contracts/staking.ts b/evm-tests/src/contracts/staking.ts new file mode 100644 index 0000000000..af4422ca96 --- /dev/null +++ b/evm-tests/src/contracts/staking.ts @@ -0,0 +1,291 @@ +export const ISTAKING_ADDRESS = "0x0000000000000000000000000000000000000801"; +export const ISTAKING_V2_ADDRESS = "0x0000000000000000000000000000000000000805"; + +export const IStakingABI = [ + { + inputs: [ + { + internalType: "bytes32", + name: "delegate", + type: "bytes32", + }, + ], + name: "addProxy", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [ + { + internalType: "bytes32", + name: "hotkey", + type: "bytes32", + }, + { + internalType: "uint256", + name: "netuid", + type: "uint256", + }, + ], + name: "addStake", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "bytes32", + name: "delegate", + type: "bytes32", + }, + ], + name: "removeProxy", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [ + { + internalType: "bytes32", + name: "hotkey", + type: "bytes32", + }, + { + internalType: "bytes32", + name: "coldkey", + type: "bytes32", + }, + { + internalType: "uint256", + name: "netuid", + type: "uint256", + }, + ], + name: "getStake", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "bytes32", + name: "hotkey", + type: "bytes32", + }, + { + internalType: "uint256", + name: "amount", + type: "uint256", + }, + { + internalType: "uint256", + name: "netuid", + type: "uint256", + }, + ], + name: "removeStake", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, +]; + +export const IStakingV2ABI = [ + { + "inputs": [ + { + "internalType": "bytes32", + "name": "delegate", + "type": "bytes32" + } + ], + "name": "addProxy", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "addStake", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "getAlphaStakedValidators", + "outputs": [ + { + "internalType": "uint256[]", + "name": "", + "type": "uint256[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "coldkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "getStake", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "getTotalAlphaStaked", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "coldkey", + "type": "bytes32" + } + ], + "name": "getTotalColdkeyStake", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + } + ], + "name": "getTotalHotkeyStake", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "delegate", + "type": "bytes32" + } + ], + "name": "removeProxy", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "removeStake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +]; \ No newline at end of file diff --git a/evm-tests/src/contracts/subnet.ts b/evm-tests/src/contracts/subnet.ts new file mode 100644 index 0000000000..9b6fe00596 --- /dev/null +++ b/evm-tests/src/contracts/subnet.ts @@ -0,0 +1,889 @@ +export const ISUBNET_ADDRESS = "0x0000000000000000000000000000000000000803"; + +export const ISubnetABI = [ + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getAdjustmentAlpha", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getAlphaValues", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getBondsMovingAverage", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getCommitRevealWeightsEnabled", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getDifficulty", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + name: "getImmunityPeriod", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + name: "getKappa", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getMaxBurn", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getMaxDifficulty", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getMaxWeightLimit", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getMinAllowedWeights", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getMinBurn", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getMinDifficulty", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getNetworkRegistrationAllowed", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + name: "getRho", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getServingRateLimit", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getWeightsSetRateLimit", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getWeightsVersionKey", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "activityCutoff", + type: "uint16", + }, + ], + name: "setActivityCutoff", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getActivityCutoff", + outputs: [ + { + internalType: "uint16", + name: "", + type: "uint16", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "adjustmentAlpha", + type: "uint64", + }, + ], + name: "setAdjustmentAlpha", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "alphaLow", + type: "uint16", + }, + { + internalType: "uint16", + name: "alphaHigh", + type: "uint16", + }, + ], + name: "setAlphaValues", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "bondsMovingAverage", + type: "uint64", + }, + ], + name: "setBondsMovingAverage", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bool", + name: "commitRevealWeightsEnabled", + type: "bool", + }, + ], + name: "setCommitRevealWeightsEnabled", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getCommitRevealWeightsInterval", + outputs: [ + { + internalType: "uint64", + name: "", + type: "uint64", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "commitRevealWeightsInterval", + type: "uint64", + }, + ], + name: "setCommitRevealWeightsInterval", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "difficulty", + type: "uint64", + }, + ], + name: "setDifficulty", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "immunityPeriod", + type: "uint16", + }, + ], + name: "setImmunityPeriod", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "kappa", + type: "uint16", + }, + ], + name: "setKappa", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getLiquidAlphaEnabled", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bool", + name: "liquidAlphaEnabled", + type: "bool", + }, + ], + name: "setLiquidAlphaEnabled", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "maxBurn", + type: "uint64", + }, + ], + name: "setMaxBurn", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "maxDifficulty", + type: "uint64", + }, + ], + name: "setMaxDifficulty", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "maxWeightLimit", + type: "uint16", + }, + ], + name: "setMaxWeightLimit", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "minAllowedWeights", + type: "uint16", + }, + ], + name: "setMinAllowedWeights", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "minBurn", + type: "uint64", + }, + ], + name: "setMinBurn", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "minDifficulty", + type: "uint64", + }, + ], + name: "setMinDifficulty", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + ], + name: "getNetworkPowRegistrationAllowed", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bool", + name: "networkPowRegistrationAllowed", + type: "bool", + }, + ], + name: "setNetworkPowRegistrationAllowed", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "bool", + name: "networkRegistrationAllowed", + type: "bool", + }, + ], + name: "setNetworkRegistrationAllowed", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint16", + name: "rho", + type: "uint16", + }, + ], + name: "setRho", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "servingRateLimit", + type: "uint64", + }, + ], + name: "setServingRateLimit", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "weightsSetRateLimit", + type: "uint64", + }, + ], + name: "setWeightsSetRateLimit", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "uint16", + name: "netuid", + type: "uint16", + }, + { + internalType: "uint64", + name: "weightsVersionKey", + type: "uint64", + }, + ], + name: "setWeightsVersionKey", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "bytes32", + name: "hotkey", + type: "bytes32", + }, + ], + name: "registerNetwork", + outputs: [], + stateMutability: "payable", + type: "function", + }, + { + inputs: [ + { + internalType: "bytes32", + name: "hotkey", + type: "bytes32" + }, + { + internalType: "string", + name: "subnetName", + type: "string" + }, + { + internalType: "string", + name: "githubRepo", + type: "string" + }, + { + internalType: "string", + name: "subnetContact", + type: "string" + }, + { + internalType: "string", + name: "subnetUrl", + type: "string" + }, + { + internalType: "string", + name: "discord", + type: "string" + }, + { + internalType: "string", + name: "description", + type: "string" + }, + { + internalType: "string", + name: "additional", + type: "string" + } + ], + name: "registerNetwork", + outputs: [], + stateMutability: "payable", + type: "function" + }, +]; \ No newline at end of file diff --git a/evm-tests/src/contracts/withdraw.sol b/evm-tests/src/contracts/withdraw.sol new file mode 100644 index 0000000000..3945661e09 --- /dev/null +++ b/evm-tests/src/contracts/withdraw.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract Withdraw { + constructor() {} + + function withdraw(uint256 value) public payable { + payable(msg.sender).transfer(value); + } + + receive() external payable {} +} diff --git a/evm-tests/src/contracts/withdraw.ts b/evm-tests/src/contracts/withdraw.ts new file mode 100644 index 0000000000..46fe66bf24 --- /dev/null +++ b/evm-tests/src/contracts/withdraw.ts @@ -0,0 +1,31 @@ +export const WITHDRAW_CONTRACT_ABI = [ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "withdraw", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "stateMutability": "payable", + "type": "receive" + } +]; + +// "compiler": { +// "version": "0.8.26+commit.8a97fa7a" +// }, + +export const WITHDRAW_CONTRACT_BYTECODE = "6080604052348015600e575f80fd5b506101148061001c5f395ff3fe608060405260043610601e575f3560e01c80632e1a7d4d146028576024565b36602457005b5f80fd5b603e6004803603810190603a919060b8565b6040565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc8290811502906040515f60405180830381858888f193505050501580156082573d5f803e3d5ffd5b5050565b5f80fd5b5f819050919050565b609a81608a565b811460a3575f80fd5b50565b5f8135905060b2816093565b92915050565b5f6020828403121560ca5760c96086565b5b5f60d58482850160a6565b9150509291505056fea2646970667358221220f43400858bfe4fcc0bf3c1e2e06d3a9e6ced86454a00bd7e4866b3d4d64e46bb64736f6c634300081a0033" + diff --git a/evm-tests/src/eth.ts b/evm-tests/src/eth.ts new file mode 100644 index 0000000000..ea3ebb9976 --- /dev/null +++ b/evm-tests/src/eth.ts @@ -0,0 +1,17 @@ + +import { ethers, Provider, TransactionRequest, Wallet } from "ethers"; +export async function estimateTransactionCost(provider: Provider, tx: TransactionRequest) { + const feeData = await provider.getFeeData(); + const estimatedGas = BigInt(await provider.estimateGas(tx)); + const gasPrice = feeData.gasPrice || feeData.maxFeePerGas; + if (gasPrice === null) + return estimatedGas + else + return estimatedGas * BigInt(gasPrice); +} + +export function getContract(contractAddress: string, abi: {}[], wallet: Wallet) { + const contract = new ethers.Contract(contractAddress, abi, wallet); + return contract + +} \ No newline at end of file diff --git a/evm-tests/src/substrate.ts b/evm-tests/src/substrate.ts new file mode 100644 index 0000000000..ddfdfb626d --- /dev/null +++ b/evm-tests/src/substrate.ts @@ -0,0 +1,274 @@ +import * as assert from "assert"; +import { devnet, MultiAddress } from '@polkadot-api/descriptors'; +import { createClient, TypedApi, Transaction, PolkadotSigner, Binary } from 'polkadot-api'; +import { getWsProvider } from 'polkadot-api/ws-provider/web'; +import { sr25519CreateDerive } from "@polkadot-labs/hdkd" +import { convertPublicKeyToSs58 } from "../src/address-utils" +import { DEV_PHRASE, entropyToMiniSecret, mnemonicToEntropy, KeyPair } from "@polkadot-labs/hdkd-helpers" +import { getPolkadotSigner } from "polkadot-api/signer" +import { randomBytes } from 'crypto'; +import { Keyring } from '@polkadot/keyring'; +import { SS58_PREFIX, TX_TIMEOUT } from "./config"; + +let api: TypedApi | undefined = undefined + +// define url string as type to extend in the future +// export type ClientUrlType = 'ws://localhost:9944' | 'wss://test.finney.opentensor.ai:443' | 'wss://dev.chain.opentensor.ai:443' | 'wss://archive.chain.opentensor.ai'; +export type ClientUrlType = 'ws://localhost:9944' + +export async function getClient(url: ClientUrlType) { + const provider = getWsProvider(url); + const client = createClient(provider); + return client +} + +export async function getDevnetApi() { + if (api === undefined) { + let client = await getClient('ws://localhost:9944') + api = client.getTypedApi(devnet) + } + return api +} + +export function getAlice() { + const entropy = mnemonicToEntropy(DEV_PHRASE) + const miniSecret = entropyToMiniSecret(entropy) + const derive = sr25519CreateDerive(miniSecret) + const hdkdKeyPair = derive("//Alice") + + return hdkdKeyPair +} + +export function getAliceSigner() { + const alice = getAlice() + const polkadotSigner = getPolkadotSigner( + alice.publicKey, + "Sr25519", + alice.sign, + ) + + return polkadotSigner +} + +export function getRandomSubstrateSigner() { + const keypair = getRandomSubstrateKeypair(); + return getSignerFromKeypair(keypair) +} + +export function getSignerFromKeypair(keypair: KeyPair) { + const polkadotSigner = getPolkadotSigner( + keypair.publicKey, + "Sr25519", + keypair.sign, + ) + return polkadotSigner +} + +export function getRandomSubstrateKeypair() { + const seed = randomBytes(32); + const miniSecret = entropyToMiniSecret(seed) + const derive = sr25519CreateDerive(miniSecret) + const hdkdKeyPair = derive("") + + return hdkdKeyPair +} + +export async function getBalance(api: TypedApi) { + const value = await api.query.Balances.Account.getValue("") + return value +} + +export async function getNonce(api: TypedApi, ss58Address: string): Promise { + const value = await api.query.System.Account.getValue(ss58Address); + return value.nonce +} + +export async function getNonceChangePromise(api: TypedApi, ss58Address: string) { + // api.query.System.Account.getValue() + const initValue = await api.query.System.Account.getValue(ss58Address); + return new Promise((resolve, reject) => { + const subscription = api.query.System.Account.watchValue(ss58Address).subscribe({ + next(value) { + if (value.nonce > initValue.nonce) { + subscription.unsubscribe(); + // Resolve the promise when the transaction is finalized + resolve(); + } + }, + + error(err: Error) { + console.error("Transaction failed:", err); + subscription.unsubscribe(); + // Reject the promise in case of an error + reject(err); + }, + complete() { + console.log("Subscription complete"); + } + }) + + setTimeout(() => { + subscription.unsubscribe(); + console.log('unsubscribed!'); + resolve() + }, TX_TIMEOUT); + + }) +} + +export function convertPublicKeyToMultiAddress(publicKey: Uint8Array, ss58Format: number = SS58_PREFIX): MultiAddress { + // Create a keyring instance + const keyring = new Keyring({ type: 'sr25519', ss58Format }); + + // Add the public key to the keyring + const address = keyring.encodeAddress(publicKey); + + return MultiAddress.Id(address); +} + + +export async function waitForTransactionCompletion(api: TypedApi, tx: Transaction<{}, string, string, void>, signer: PolkadotSigner,) { + const transactionPromise = await getTransactionWatchPromise(tx, signer) + return transactionPromise + + // If we can't always get the finalized event, then add nonce subscribe as other evidence for tx is finalized. + // Don't need it based on current testing. + // const ss58Address = convertPublicKeyToSs58(signer.publicKey) + // const noncePromise = await getNonceChangePromise(api, ss58Address) + + // return new Promise((resolve, reject) => { + // Promise.race([transactionPromise, noncePromise]) + // .then(resolve) + // .catch(reject); + // }) +} + +export async function getTransactionWatchPromise(tx: Transaction<{}, string, string, void>, signer: PolkadotSigner,) { + return new Promise((resolve, reject) => { + // store the txHash, then use it in timeout. easier to know which tx is not finalized in time + let txHash = "" + const subscription = tx.signSubmitAndWatch(signer).subscribe({ + next(value) { + console.log("Event:", value); + txHash = value.txHash + + // TODO investigate why finalized not for each extrinsic + if (value.type === "finalized") { + console.log("Transaction is finalized in block:", value.txHash); + subscription.unsubscribe(); + // Resolve the promise when the transaction is finalized + resolve(); + + } + }, + error(err) { + console.error("Transaction failed:", err); + subscription.unsubscribe(); + // Reject the promise in case of an error + reject(err); + + }, + complete() { + console.log("Subscription complete"); + } + }); + + setTimeout(() => { + subscription.unsubscribe(); + console.log('unsubscribed because of timeout for tx {}', txHash); + reject() + }, TX_TIMEOUT); + }); +} + +export async function waitForFinalizedBlock(api: TypedApi) { + const currentBlockNumber = await api.query.System.Number.getValue() + return new Promise((resolve, reject) => { + + const subscription = api.query.System.Number.watchValue().subscribe({ + // TODO check why the block number event just get once + next(value: number) { + console.log("Event block number is :", value); + + if (value > currentBlockNumber + 6) { + console.log("Transaction is finalized in block:", value); + subscription.unsubscribe(); + + resolve(); + + } + + }, + error(err: Error) { + console.error("Transaction failed:", err); + subscription.unsubscribe(); + // Reject the promise in case of an error + reject(err); + + }, + complete() { + console.log("Subscription complete"); + } + }); + + setTimeout(() => { + subscription.unsubscribe(); + console.log('unsubscribed!'); + resolve() + }, 2000); + }); +} + +// second solution to wait for transaction finalization. pass the raw data to avoid the complex transaction type definition +export async function waitForTransactionCompletion2(api: TypedApi, raw: Binary, signer: PolkadotSigner,) { + const tx = await api.txFromCallData(raw); + return new Promise((resolve, reject) => { + const subscription = tx.signSubmitAndWatch(signer).subscribe({ + next(value) { + console.log("Event:", value); + + if (value.type === "txBestBlocksState") { + console.log("Transaction is finalized in block:", value.txHash); + subscription.unsubscribe(); + // Resolve the promise when the transaction is finalized + resolve(); + + } + }, + error(err: Error) { + console.error("Transaction failed:", err); + subscription.unsubscribe(); + // Reject the promise in case of an error + reject(err); + + }, + complete() { + console.log("Subscription complete"); + } + }); + }); +} + +export async function waitForNonceChange(api: TypedApi, ss58Address: string) { + const initNonce = await getNonce(api, ss58Address) + while (true) { + const currentNonce = await getNonce(api, ss58Address) + if (currentNonce > initNonce) { + break + } + + await new Promise(resolve => setTimeout(resolve, 200)); + } +} + + +// other approach to convert public key to ss58 +// export function convertPublicKeyToSs58(publicKey: Uint8Array, ss58Format: number = 42): string { +// // Create a keyring instance +// const keyring = new Keyring({ type: 'sr25519', ss58Format }); + +// // Add the public key to the keyring +// const address = keyring.encodeAddress(publicKey); + +// return address +// } \ No newline at end of file diff --git a/evm-tests/src/subtensor.ts b/evm-tests/src/subtensor.ts new file mode 100644 index 0000000000..48dc5c83c7 --- /dev/null +++ b/evm-tests/src/subtensor.ts @@ -0,0 +1,345 @@ +import * as assert from "assert"; +import { devnet, MultiAddress } from '@polkadot-api/descriptors'; +import { TypedApi, TxCallData } from 'polkadot-api'; +import { KeyPair } from "@polkadot-labs/hdkd-helpers" +import { getAliceSigner, waitForTransactionCompletion, getSignerFromKeypair } from './substrate' +import { convertH160ToSS58, convertPublicKeyToSs58 } from './address-utils' +import { tao } from './balance-math' + +// create a new subnet and return netuid +export async function addNewSubnetwork(api: TypedApi, hotkey: KeyPair, coldkey: KeyPair) { + const alice = getAliceSigner() + const totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() + + const rateLimit = await api.query.SubtensorModule.NetworkRateLimit.getValue() + if (rateLimit !== BigInt(0)) { + const internalCall = api.tx.AdminUtils.sudo_set_network_rate_limit({ rate_limit: BigInt(0) }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } + + const signer = getSignerFromKeypair(coldkey) + const registerNetworkTx = api.tx.SubtensorModule.register_network({ hotkey: convertPublicKeyToSs58(hotkey.publicKey) }) + await waitForTransactionCompletion(api, registerNetworkTx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + assert.equal(totalNetworks + 1, await api.query.SubtensorModule.TotalNetworks.getValue()) + return totalNetworks +} + +// force set balance for a ss58 address +export async function forceSetBalanceToSs58Address(api: TypedApi, ss58Address: string) { + const alice = getAliceSigner() + const balance = tao(1e8) + const internalCall = api.tx.Balances.force_set_balance({ who: MultiAddress.Id(ss58Address), new_free: balance }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + const balanceOnChain = (await api.query.System.Account.getValue(ss58Address)).data.free + // check the balance except for sudo account becasue of tx fee + if (ss58Address !== convertPublicKeyToSs58(alice.publicKey)) { + assert.equal(balance, balanceOnChain) + } +} + +// set balance for an eth address +export async function forceSetBalanceToEthAddress(api: TypedApi, ethAddress: string) { + const ss58Address = convertH160ToSS58(ethAddress) + await forceSetBalanceToSs58Address(api, ss58Address) +} + +export async function setCommitRevealWeightsEnabled(api: TypedApi, netuid: number, enabled: boolean) { + const value = await api.query.SubtensorModule.CommitRevealWeightsEnabled.getValue(netuid) + if (value === enabled) { + return; + } + + const alice = getAliceSigner() + const internalCall = api.tx.AdminUtils.sudo_set_commit_reveal_weights_enabled({ netuid: netuid, enabled: enabled }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(enabled, await api.query.SubtensorModule.CommitRevealWeightsEnabled.getValue(netuid)) +} + +export async function setWeightsSetRateLimit(api: TypedApi, netuid: number, rateLimit: bigint) { + const value = await api.query.SubtensorModule.WeightsSetRateLimit.getValue(netuid) + if (value === rateLimit) { + return; + } + + const alice = getAliceSigner() + const internalCall = api.tx.AdminUtils.sudo_set_weights_set_rate_limit({ netuid: netuid, weights_set_rate_limit: rateLimit }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(rateLimit, await api.query.SubtensorModule.WeightsSetRateLimit.getValue(netuid)) +} + +// tempo is u16 in rust, but we just number in js. so value should be less than u16::Max +export async function setTempo(api: TypedApi, netuid: number, tempo: number) { + const value = await api.query.SubtensorModule.Tempo.getValue(netuid) + console.log("init avlue is ", value) + if (value === tempo) { + return; + } + + const alice = getAliceSigner() + const internalCall = api.tx.AdminUtils.sudo_set_tempo({ netuid: netuid, tempo: tempo }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(tempo, await api.query.SubtensorModule.Tempo.getValue(netuid)) +} + +export async function setCommitRevealWeightsInterval(api: TypedApi, netuid: number, interval: bigint) { + const value = await api.query.SubtensorModule.RevealPeriodEpochs.getValue(netuid) + if (value === interval) { + return; + } + + const alice = getAliceSigner() + const internalCall = api.tx.AdminUtils.sudo_set_commit_reveal_weights_interval({ netuid: netuid, interval: interval }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(interval, await api.query.SubtensorModule.RevealPeriodEpochs.getValue(netuid)) +} + + +export async function forceSetChainID(api: TypedApi, chainId: bigint) { + const value = await api.query.EVMChainId.ChainId.getValue() + if (value === chainId) { + return; + } + + const alice = getAliceSigner() + const internalCall = api.tx.AdminUtils.sudo_set_evm_chain_id({ chain_id: chainId }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(chainId, await api.query.EVMChainId.ChainId.getValue()) +} + +export async function disableWhiteListCheck(api: TypedApi, disabled: boolean) { + const value = await api.query.EVM.DisableWhitelistCheck.getValue() + if (value === disabled) { + return; + } + + const alice = getAliceSigner() + const internalCall = api.tx.EVM.disable_whitelist({ disabled: disabled }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(disabled, await api.query.EVM.DisableWhitelistCheck.getValue()) +} + +export async function burnedRegister(api: TypedApi, netuid: number, ss58Address: string, keypair: KeyPair) { + const uids = await api.query.SubtensorModule.SubnetworkN.getValue(netuid) + const signer = getSignerFromKeypair(keypair) + const tx = api.tx.SubtensorModule.burned_register({ hotkey: ss58Address, netuid: netuid }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(uids + 1, await api.query.SubtensorModule.SubnetworkN.getValue(netuid)) +} + + +export async function sendProxyCall(api: TypedApi, calldata: TxCallData, ss58Address: string, keypair: KeyPair) { + const signer = getSignerFromKeypair(keypair) + const tx = api.tx.Proxy.proxy({ + call: calldata, + real: MultiAddress.Id(ss58Address), + force_proxy_type: undefined + }); + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); +} + + +export async function setTxRateLimit(api: TypedApi, txRateLimit: bigint) { + const value = await api.query.SubtensorModule.TxRateLimit.getValue() + if (value === txRateLimit) { + return; + } + const alice = getAliceSigner() + + const internalCall = api.tx.AdminUtils.sudo_set_tx_rate_limit({ tx_rate_limit: txRateLimit }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(txRateLimit, await api.query.SubtensorModule.TxRateLimit.getValue()) +} + +export async function setMaxAllowedValidators(api: TypedApi, netuid: number, maxAllowedValidators: number) { + const value = await api.query.SubtensorModule.MaxAllowedValidators.getValue(netuid) + if (value === maxAllowedValidators) { + return; + } + + const alice = getAliceSigner() + + const internalCall = api.tx.AdminUtils.sudo_set_max_allowed_validators({ + netuid: netuid, + max_allowed_validators: maxAllowedValidators + }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(maxAllowedValidators, await api.query.SubtensorModule.MaxAllowedValidators.getValue(netuid)) +} + +export async function setSubnetOwnerCut(api: TypedApi, subnetOwnerCut: number) { + const value = await api.query.SubtensorModule.SubnetOwnerCut.getValue() + if (value === subnetOwnerCut) { + return; + } + + const alice = getAliceSigner() + + const internalCall = api.tx.AdminUtils.sudo_set_subnet_owner_cut({ + subnet_owner_cut: subnetOwnerCut + }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(subnetOwnerCut, await api.query.SubtensorModule.SubnetOwnerCut.getValue()) +} + +export async function setActivityCutoff(api: TypedApi, netuid: number, activityCutoff: number) { + const value = await api.query.SubtensorModule.ActivityCutoff.getValue(netuid) + if (value === activityCutoff) { + return; + } + + const alice = getAliceSigner() + + const internalCall = api.tx.AdminUtils.sudo_set_activity_cutoff({ + netuid: netuid, + activity_cutoff: activityCutoff + }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(activityCutoff, await api.query.SubtensorModule.ActivityCutoff.getValue(netuid)) +} + +export async function setMaxAllowedUids(api: TypedApi, netuid: number, maxAllowedUids: number) { + const value = await api.query.SubtensorModule.MaxAllowedUids.getValue(netuid) + if (value === maxAllowedUids) { + return; + } + + const alice = getAliceSigner() + + const internalCall = api.tx.AdminUtils.sudo_set_max_allowed_uids({ + netuid: netuid, + max_allowed_uids: maxAllowedUids + }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(maxAllowedUids, await api.query.SubtensorModule.MaxAllowedUids.getValue(netuid)) +} + +export async function setMinDelegateTake(api: TypedApi, minDelegateTake: number) { + const value = await api.query.SubtensorModule.MinDelegateTake.getValue() + if (value === minDelegateTake) { + return; + } + + const alice = getAliceSigner() + + const internalCall = api.tx.AdminUtils.sudo_set_min_delegate_take({ + take: minDelegateTake + }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + assert.equal(minDelegateTake, await api.query.SubtensorModule.MinDelegateTake.getValue()) +} + +export async function becomeDelegate(api: TypedApi, ss58Address: string, keypair: KeyPair) { + const singer = getSignerFromKeypair(keypair) + + const tx = api.tx.SubtensorModule.become_delegate({ + hotkey: ss58Address + }) + await waitForTransactionCompletion(api, tx, singer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); +} + +export async function addStake(api: TypedApi, netuid: number, ss58Address: string, amount_staked: bigint, keypair: KeyPair) { + const singer = getSignerFromKeypair(keypair) + let tx = api.tx.SubtensorModule.add_stake({ + netuid: netuid, + hotkey: ss58Address, + amount_staked: amount_staked + }) + + await waitForTransactionCompletion(api, tx, singer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + +} + +export async function setWeight(api: TypedApi, netuid: number, dests: number[], weights: number[], version_key: bigint, keypair: KeyPair) { + const singer = getSignerFromKeypair(keypair) + let tx = api.tx.SubtensorModule.set_weights({ + netuid: netuid, + dests: dests, + weights: weights, + version_key: version_key + }) + + await waitForTransactionCompletion(api, tx, singer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + +} + +export async function rootRegister(api: TypedApi, ss58Address: string, keypair: KeyPair) { + const singer = getSignerFromKeypair(keypair) + let tx = api.tx.SubtensorModule.root_register({ + hotkey: ss58Address + }) + + await waitForTransactionCompletion(api, tx, singer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + +} \ No newline at end of file diff --git a/evm-tests/src/utils.ts b/evm-tests/src/utils.ts new file mode 100644 index 0000000000..36e922b49e --- /dev/null +++ b/evm-tests/src/utils.ts @@ -0,0 +1,55 @@ +import { defineChain, http, publicActions, createPublicClient } from "viem" +import { privateKeyToAccount, generatePrivateKey } from 'viem/accounts' +import { ethers } from "ethers" +import { ETH_LOCAL_URL } from "./config" + +export type ClientUrlType = 'http://localhost:9944'; + +export const chain = (id: number, url: string) => defineChain({ + id: id, + name: 'bittensor', + network: 'bittensor', + nativeCurrency: { + name: 'tao', + symbol: 'TAO', + decimals: 9, + }, + rpcUrls: { + default: { + http: [url], + }, + }, + testnet: true, +}) + + +export async function getPublicClient(url: ClientUrlType) { + const wallet = createPublicClient({ + chain: chain(42, url), + transport: http(), + + }) + + return wallet.extend(publicActions) +} + +/** + * Generates a random Ethereum wallet + * @returns wallet keyring + */ +export function generateRandomEthWallet() { + let privateKey = generatePrivateKey().toString(); + privateKey = privateKey.replace('0x', ''); + + const account = privateKeyToAccount(`0x${privateKey}`) + return account +} + + +export function generateRandomEthersWallet() { + const account = ethers.Wallet.createRandom(); + const provider = new ethers.JsonRpcProvider(ETH_LOCAL_URL); + + const wallet = new ethers.Wallet(account.privateKey, provider); + return wallet; +} \ No newline at end of file diff --git a/evm-tests/test/ed25519.precompile.verify.test.ts b/evm-tests/test/ed25519.precompile.verify.test.ts new file mode 100644 index 0000000000..fcd79ec9d7 --- /dev/null +++ b/evm-tests/test/ed25519.precompile.verify.test.ts @@ -0,0 +1,122 @@ +import { IED25519VERIFY_ADDRESS, IEd25519VerifyABI, ETH_LOCAL_URL } from '../src/config' +import { getPublicClient } from "../src/utils"; +import { toHex, toBytes, keccak256, PublicClient } from 'viem' +import { Keyring } from "@polkadot/keyring"; +import * as assert from "assert"; + +describe("Verfication of ed25519 signature", () => { + // init eth part + let ethClient: PublicClient; + + before(async () => { + ethClient = await getPublicClient(ETH_LOCAL_URL); + }); + + it("Verification of ed25519 works", async () => { + const keyring = new Keyring({ type: "ed25519" }); + const alice = keyring.addFromUri("//Alice"); + + // Use this example: https://github.com/gztensor/evm-demo/blob/main/docs/ed25519verify-precompile.md + // const keyring = new Keyring({ type: "ed25519" }); + // const myAccount = keyring.addFromUri("//Alice"); + + ////////////////////////////////////////////////////////////////////// + // Generate a signature + + // Your message to sign + const message = "Sign this message"; + const messageU8a = new TextEncoder().encode(message); + const messageHex = toHex(messageU8a); // Convert message to hex string + const messageHash = keccak256(messageHex); // Hash the message to fit into bytes32 + console.log(`messageHash = ${messageHash}`); + const hashedMessageBytes = toBytes(messageHash); + console.log(`hashedMessageBytes = ${hashedMessageBytes}`); + + // Sign the message + const signature = await alice.sign(hashedMessageBytes); + console.log(`Signature: ${toHex(signature)}`); + + // Verify the signature locally + const isValid = alice.verify( + hashedMessageBytes, + signature, + alice.publicKey + ); + console.log(`Is the signature valid? ${isValid}`); + + ////////////////////////////////////////////////////////////////////// + // Verify the signature using the precompile contract + + const publicKeyBytes = toHex(alice.publicKey); + console.log(`publicKeyBytes = ${publicKeyBytes}`); + + // Split signture into Commitment (R) and response (s) + let r = signature.slice(0, 32); // Commitment, a.k.a. "r" - first 32 bytes + let s = signature.slice(32, 64); // Response, a.k.a. "s" - second 32 bytes + let rBytes = toHex(r); + let sBytes = toHex(s); + + const isPrecompileValid = await ethClient.readContract({ + address: IED25519VERIFY_ADDRESS, + abi: IEd25519VerifyABI, + functionName: "verify", + args: [messageHash, + publicKeyBytes, + rBytes, + sBytes] + + }); + + console.log( + `Is the signature valid according to the smart contract? ${isPrecompileValid}` + ); + assert.equal(isPrecompileValid, true) + + ////////////////////////////////////////////////////////////////////// + // Verify the signature for bad data using the precompile contract + + let brokenHashedMessageBytes = hashedMessageBytes; + brokenHashedMessageBytes[0] = (brokenHashedMessageBytes[0] + 1) % 0xff; + const brokenMessageHash = toHex(brokenHashedMessageBytes); + console.log(`brokenMessageHash = ${brokenMessageHash}`); + + const isPrecompileValidBadData = await ethClient.readContract({ + address: IED25519VERIFY_ADDRESS, + abi: IEd25519VerifyABI, + functionName: "verify", + args: [brokenMessageHash, + publicKeyBytes, + rBytes, + sBytes] + + }); + + console.log( + `Is the signature valid according to the smart contract for broken data? ${isPrecompileValidBadData}` + ); + assert.equal(isPrecompileValidBadData, false) + + ////////////////////////////////////////////////////////////////////// + // Verify the bad signature for good data using the precompile contract + + let brokenR = r; + brokenR[0] = (brokenR[0] + 1) % 0xff; + rBytes = toHex(r); + const isPrecompileValidBadSignature = await ethClient.readContract({ + address: IED25519VERIFY_ADDRESS, + abi: IEd25519VerifyABI, + functionName: "verify", + args: [messageHash, + publicKeyBytes, + rBytes, + sBytes] + + }); + + console.log( + `Is the signature valid according to the smart contract for broken signature? ${isPrecompileValidBadSignature}` + ); + assert.equal(isPrecompileValidBadSignature, false) + + }); +}); \ No newline at end of file diff --git a/evm-tests/test/eth.bridgeToken.deploy.test.ts b/evm-tests/test/eth.bridgeToken.deploy.test.ts new file mode 100644 index 0000000000..94ebcd1260 --- /dev/null +++ b/evm-tests/test/eth.bridgeToken.deploy.test.ts @@ -0,0 +1,69 @@ +import * as assert from "assert"; +import * as chai from "chai"; + +import { getDevnetApi } from "../src/substrate" +import { generateRandomEthersWallet, getPublicClient } from "../src/utils"; +import { ETH_LOCAL_URL } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { PublicClient } from "viem"; +import { TypedApi } from "polkadot-api"; +import { BRIDGE_TOKEN_CONTRACT_ABI, BRIDGE_TOKEN_CONTRACT_BYTECODE } from "../src/contracts/bridgeToken"; +import { toViemAddress } from "../src/address-utils"; +import { forceSetBalanceToEthAddress, disableWhiteListCheck } from "../src/subtensor"; +import { ethers } from "ethers" +describe("bridge token contract deployment", () => { + // init eth part + const wallet = generateRandomEthersWallet(); + let publicClient: PublicClient; + + // init substrate part + let api: TypedApi + + before(async () => { + // init variables got from await and async + publicClient = await getPublicClient(ETH_LOCAL_URL) + api = await getDevnetApi() + + await forceSetBalanceToEthAddress(api, wallet.address) + await disableWhiteListCheck(api, true) + }); + + it("Can deploy bridge token smart contract", async () => { + const contractFactory = new ethers.ContractFactory(BRIDGE_TOKEN_CONTRACT_ABI, BRIDGE_TOKEN_CONTRACT_BYTECODE, wallet) + const contract = await contractFactory.deploy("name", + "symbol", wallet.address) + await contract.waitForDeployment() + assert.notEqual(contract.target, undefined) + + const contractAddress = contract.target.toString() + + const code = await publicClient.getCode({ address: toViemAddress(contractAddress) }) + if (code === undefined) { + throw new Error("code not available") + } + assert.ok(code.length > 100) + assert.ok(code.includes("0x60806040523480156")) + }); + + it("Can deploy bridge token contract with gas limit", async () => { + const contractFactory = new ethers.ContractFactory(BRIDGE_TOKEN_CONTRACT_ABI, BRIDGE_TOKEN_CONTRACT_BYTECODE, wallet) + const successful_gas_limit = "12345678"; + const contract = await contractFactory.deploy("name", + "symbol", wallet.address, + { + gasLimit: successful_gas_limit, + } + ) + await contract.waitForDeployment() + assert.notEqual(contract.target, undefined) + + const contractAddress = contract.target.toString() + + const code = await publicClient.getCode({ address: toViemAddress(contractAddress) }) + if (code === undefined) { + throw new Error("code not available") + } + assert.ok(code.length > 100) + assert.ok(code.includes("0x60806040523480156")) + }); +}); \ No newline at end of file diff --git a/evm-tests/test/eth.chain-id.test.ts b/evm-tests/test/eth.chain-id.test.ts new file mode 100644 index 0000000000..09174c1212 --- /dev/null +++ b/evm-tests/test/eth.chain-id.test.ts @@ -0,0 +1,76 @@ + +import * as assert from "assert"; +import * as chai from "chai"; + +import { getDevnetApi, waitForTransactionCompletion, getRandomSubstrateKeypair } from "../src/substrate" +import { generateRandomEthWallet, getPublicClient } from "../src/utils"; +import { convertPublicKeyToSs58 } from "../src/address-utils" +import { ETH_LOCAL_URL } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { getPolkadotSigner } from "polkadot-api/signer"; +import { PublicClient } from "viem"; +import { TypedApi } from "polkadot-api"; +import { forceSetBalanceToSs58Address, forceSetChainID } from "../src/subtensor"; + +describe("Test the EVM chain ID", () => { + // init eth part + const wallet = generateRandomEthWallet(); + let ethClient: PublicClient; + + // init substrate part + const keyPair = getRandomSubstrateKeypair(); + let api: TypedApi; + + // init other variable + const initChainId = 42; + + before(async () => { + // init variables got from await and async + ethClient = await getPublicClient(ETH_LOCAL_URL); + api = await getDevnetApi() + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(keyPair.publicKey)) + + }); + + it("EVM chain id update is ok", async () => { + let chainId = await ethClient.getChainId(); + // init chain id should be 42 + assert.equal(chainId, initChainId); + + const newChainId = BigInt(100) + await forceSetChainID(api, newChainId) + + chainId = await ethClient.getChainId(); + assert.equal(chainId, newChainId); + + await forceSetChainID(api, BigInt(initChainId)) + + chainId = await ethClient.getChainId(); + // back to original value for other tests. and we can run it repeatedly + assert.equal(chainId, initChainId); + + }); + + it("EVM chain id is the same, only sudo can change it.", async () => { + let chainId = await ethClient.getChainId(); + // init chain id should be 42 + assert.equal(chainId, initChainId); + + // invalide signer for set chain ID + let signer = getPolkadotSigner( + keyPair.publicKey, + "Sr25519", + keyPair.sign, + ) + + let tx = api.tx.AdminUtils.sudo_set_evm_chain_id({ chain_id: BigInt(100) }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + // extrinsic should be failed and chain ID not updated. + chainId = await ethClient.getChainId(); + assert.equal(chainId, 42); + + }); +}); \ No newline at end of file diff --git a/evm-tests/test/eth.incremental.deploy.test.ts b/evm-tests/test/eth.incremental.deploy.test.ts new file mode 100644 index 0000000000..c22187538d --- /dev/null +++ b/evm-tests/test/eth.incremental.deploy.test.ts @@ -0,0 +1,61 @@ + + +import * as assert from "assert"; +import * as chai from "chai"; + +import { getDevnetApi } from "../src/substrate" +import { generateRandomEthersWallet, getPublicClient } from "../src/utils"; +import { ETH_LOCAL_URL } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { PublicClient } from "viem"; +import { TypedApi } from "polkadot-api"; +import { INCREMENTAL_CONTRACT_ABI, INCREMENTAL_CONTRACT_BYTECODE } from "../src/contracts/incremental"; +import { toViemAddress } from "../src/address-utils"; +import { ethers } from "ethers" +import { disableWhiteListCheck, forceSetBalanceToEthAddress } from "../src/subtensor"; + +describe("bridge token contract deployment", () => { + // init eth part + const wallet = generateRandomEthersWallet(); + let publicClient: PublicClient; + + // init substrate part + let api: TypedApi + + before(async () => { + publicClient = await getPublicClient(ETH_LOCAL_URL) + api = await getDevnetApi() + + await forceSetBalanceToEthAddress(api, wallet.address) + await disableWhiteListCheck(api, true) + }); + + it("Can deploy incremental smart contract", async () => { + const contractFactory = new ethers.ContractFactory(INCREMENTAL_CONTRACT_ABI, INCREMENTAL_CONTRACT_BYTECODE, wallet) + const contract = await contractFactory.deploy() + await contract.waitForDeployment() + + const value = await publicClient.readContract({ + abi: INCREMENTAL_CONTRACT_ABI, + address: toViemAddress(contract.target.toString()), + functionName: "retrieve", + args: [] + }) + assert.equal(value, 0) + + const newValue = 1234 + + const deployContract = new ethers.Contract(contract.target.toString(), INCREMENTAL_CONTRACT_ABI, wallet) + const storeTx = await deployContract.store(newValue) + await storeTx.wait() + + const newValueAfterStore = await publicClient.readContract({ + abi: INCREMENTAL_CONTRACT_ABI, + address: toViemAddress(contract.target.toString()), + functionName: "retrieve", + args: [] + }) + + assert.equal(newValue, newValueAfterStore) + }); +}); diff --git a/evm-tests/test/eth.substrate-transfer.test.ts b/evm-tests/test/eth.substrate-transfer.test.ts new file mode 100644 index 0000000000..9e3a2b2050 --- /dev/null +++ b/evm-tests/test/eth.substrate-transfer.test.ts @@ -0,0 +1,412 @@ +import * as assert from "assert"; + +import { getDevnetApi, waitForTransactionCompletion, getRandomSubstrateSigner, } from "../src/substrate" +import { getPublicClient } from "../src/utils"; +import { ETH_LOCAL_URL, IBALANCETRANSFER_ADDRESS, IBalanceTransferABI } from "../src/config"; +import { devnet, MultiAddress } from "@polkadot-api/descriptors" +import { PublicClient } from "viem"; +import { TypedApi, Binary, FixedSizeBinary } from "polkadot-api"; +import { generateRandomEthersWallet } from "../src/utils"; +import { tao, raoToEth, bigintToRao, compareEthBalanceWithTxFee } from "../src/balance-math"; +import { toViemAddress, convertPublicKeyToSs58, convertH160ToSS58, ss58ToH160, ss58ToEthAddress, ethAddressToH160 } from "../src/address-utils" +import { ethers } from "ethers" +import { estimateTransactionCost, getContract } from "../src/eth" + +import { WITHDRAW_CONTRACT_ABI, WITHDRAW_CONTRACT_BYTECODE } from "../src/contracts/withdraw" + +import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, disableWhiteListCheck } from "../src/subtensor"; + +describe("Balance transfers between substrate and EVM", () => { + const gwei = BigInt("1000000000"); + // init eth part + const wallet = generateRandomEthersWallet(); + const wallet2 = generateRandomEthersWallet(); + let publicClient: PublicClient; + const provider = new ethers.JsonRpcProvider(ETH_LOCAL_URL); + // init substrate part + const signer = getRandomSubstrateSigner(); + let api: TypedApi + + before(async () => { + + publicClient = await getPublicClient(ETH_LOCAL_URL) + api = await getDevnetApi() + + await forceSetBalanceToEthAddress(api, wallet.address) + await forceSetBalanceToEthAddress(api, wallet2.address) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(signer.publicKey)) + await disableWhiteListCheck(api, true) + }); + + it("Can transfer token from EVM to EVM", async () => { + const senderBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const receiverBalance = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + const transferBalance = raoToEth(tao(1)) + const tx = { + to: wallet2.address, + value: transferBalance.toString() + } + const txFee = await estimateTransactionCost(provider, tx) + + const txResponse = await wallet.sendTransaction(tx) + await txResponse.wait(); + + + const senderBalanceAfterTransfer = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const receiverBalanceAfterTranser = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + + assert.equal(senderBalanceAfterTransfer, senderBalance - transferBalance - txFee) + assert.equal(receiverBalance, receiverBalanceAfterTranser - transferBalance) + }); + + it("Can transfer token from Substrate to EVM", async () => { + const ss58Address = convertH160ToSS58(wallet.address) + const senderBalance = (await api.query.System.Account.getValue(ss58Address)).data.free + const receiverBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const transferBalance = tao(1) + + const tx = api.tx.Balances.transfer_keep_alive({ value: transferBalance, dest: MultiAddress.Id(ss58Address) }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + + const senderBalanceAfterTransfer = (await api.query.System.Account.getValue(ss58Address)).data.free + const receiverBalanceAfterTranser = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + + assert.equal(senderBalanceAfterTransfer, senderBalance + transferBalance) + assert.equal(receiverBalance, receiverBalanceAfterTranser - raoToEth(transferBalance)) + }); + + it("Can transfer token from EVM to Substrate", async () => { + const contract = getContract(IBALANCETRANSFER_ADDRESS, IBalanceTransferABI, wallet) + const senderBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const receiverBalance = (await api.query.System.Account.getValue(convertPublicKeyToSs58(signer.publicKey))).data.free + const transferBalance = raoToEth(tao(1)) + + const tx = await contract.transfer(signer.publicKey, { value: transferBalance.toString() }) + await tx.wait() + + + const senderBalanceAfterTransfer = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const receiverBalanceAfterTranser = (await api.query.System.Account.getValue(convertPublicKeyToSs58(signer.publicKey))).data.free + + compareEthBalanceWithTxFee(senderBalanceAfterTransfer, senderBalance - transferBalance) + assert.equal(receiverBalance, receiverBalanceAfterTranser - tao(1)) + }); + + it("Transfer from EVM to substrate using evm::withdraw", async () => { + const ss58Address = convertPublicKeyToSs58(signer.publicKey) + const senderBalance = (await api.query.System.Account.getValue(ss58Address)).data.free + const ethAddresss = ss58ToH160(ss58Address); + + // transfer token to mirror eth address + const ethTransfer = { + to: ss58ToEthAddress(ss58Address), + value: raoToEth(tao(2)).toString() + } + + const txResponse = await wallet.sendTransaction(ethTransfer) + await txResponse.wait(); + + const tx = api.tx.EVM.withdraw({ address: ethAddresss, value: tao(1) }) + const txFee = (await tx.getPaymentInfo(ss58Address)).partial_fee + + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + const senderBalanceAfterWithdraw = (await api.query.System.Account.getValue(ss58Address)).data.free + + assert.equal(senderBalance, senderBalanceAfterWithdraw - tao(1) + txFee) + }); + + it("Transfer from EVM to substrate using evm::call", async () => { + const ss58Address = convertPublicKeyToSs58(signer.publicKey) + const ethAddresss = ss58ToH160(ss58Address); + + // transfer token to mirror eth address + const ethTransfer = { + to: ss58ToEthAddress(ss58Address), + value: raoToEth(tao(2)).toString() + } + + const txResponse = await wallet.sendTransaction(ethTransfer) + await txResponse.wait(); + + const source: FixedSizeBinary<20> = ethAddresss; + const target = ethAddressToH160(wallet.address) + const receiverBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + + // all these parameter value are tricky, any change could make the call failed + const tx = api.tx.EVM.call({ + source: source, + target: target, + // it is U256 in the extrinsic. + value: [raoToEth(tao(1)), tao(0), tao(0), tao(0)], + gas_limit: BigInt(1000000), + // it is U256 in the extrinsic. + max_fee_per_gas: [BigInt(10e9), BigInt(0), BigInt(0), BigInt(0)], + max_priority_fee_per_gas: undefined, + input: Binary.fromText(""), + nonce: undefined, + access_list: [] + }) + // txFee not accurate + const txFee = (await tx.getPaymentInfo(ss58Address)).partial_fee + + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + + const receiverBalanceAfterCall = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + assert.equal(receiverBalanceAfterCall, receiverBalance + raoToEth(tao(1))) + }); + + it("Forward value in smart contract", async () => { + + + const contractFactory = new ethers.ContractFactory(WITHDRAW_CONTRACT_ABI, WITHDRAW_CONTRACT_BYTECODE, wallet) + const contract = await contractFactory.deploy() + await contract.waitForDeployment() + + const code = await publicClient.getCode({ address: toViemAddress(contract.target.toString()) }) + if (code === undefined) { + throw new Error("code length is wrong for deployed contract") + } + assert.ok(code.length > 100) + + // transfer 2 TAO to contract + const ethTransfer = { + to: contract.target.toString(), + value: raoToEth(tao(2)).toString() + } + + const txResponse = await wallet.sendTransaction(ethTransfer) + await txResponse.wait(); + + const contractBalance = await publicClient.getBalance({ address: toViemAddress(contract.target.toString()) }) + const callerBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + + const contractForCall = new ethers.Contract(contract.target.toString(), WITHDRAW_CONTRACT_ABI, wallet) + + const withdrawTx = await contractForCall.withdraw( + raoToEth(tao(1)).toString() + ); + + await withdrawTx.wait(); + + const contractBalanceAfterWithdraw = await publicClient.getBalance({ address: toViemAddress(contract.target.toString()) }) + const callerBalanceAfterWithdraw = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + + compareEthBalanceWithTxFee(callerBalanceAfterWithdraw, callerBalance + raoToEth(tao(1))) + assert.equal(contractBalance, contractBalanceAfterWithdraw + raoToEth(tao(1))) + }); + + it("Transfer full balance", async () => { + const ethBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const receiverBalance = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + const tx = { + to: wallet2.address, + value: ethBalance.toString(), + }; + const txPrice = await estimateTransactionCost(provider, tx); + const finalTx = { + to: wallet2.address, + value: (ethBalance - txPrice).toString(), + }; + try { + // transfer should be failed since substrate requires existial balance to keep account + const txResponse = await wallet.sendTransaction(finalTx) + await txResponse.wait(); + } catch (error) { + if (error instanceof Error) { + assert.equal((error as any).code, "INSUFFICIENT_FUNDS") + assert.equal(error.toString().includes("insufficient funds"), true) + } + } + + const receiverBalanceAfterTransfer = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + assert.equal(receiverBalance, receiverBalanceAfterTransfer) + }) + + it("Transfer more than owned balance should fail", async () => { + const ethBalance = await publicClient.getBalance({ address: toViemAddress(wallet.address) }) + const receiverBalance = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + const tx = { + to: wallet2.address, + value: (ethBalance + raoToEth(tao(1))).toString(), + }; + + try { + // transfer should be failed since substrate requires existial balance to keep account + const txResponse = await wallet.sendTransaction(tx) + await txResponse.wait(); + } catch (error) { + if (error instanceof Error) { + assert.equal((error as any).code, "INSUFFICIENT_FUNDS") + assert.equal(error.toString().includes("insufficient funds"), true) + } + } + + const receiverBalanceAfterTransfer = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + assert.equal(receiverBalance, receiverBalanceAfterTransfer) + }); + + it("Transfer more than u64::max in substrate equivalent should receive error response", async () => { + const receiverBalance = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + try { + const tx = { + to: wallet2.address, + value: raoToEth(BigInt(2) ** BigInt(64)).toString(), + }; + // transfer should be failed since substrate requires existial balance to keep account + const txResponse = await wallet.sendTransaction(tx) + await txResponse.wait(); + } catch (error) { + if (error instanceof Error) { + assert.equal((error as any).code, "INSUFFICIENT_FUNDS") + assert.equal(error.toString().includes("insufficient funds"), true) + } + } + + const contract = getContract(IBALANCETRANSFER_ADDRESS, IBalanceTransferABI, wallet) + try { + const tx = await contract.transfer(signer.publicKey, { value: raoToEth(BigInt(2) ** BigInt(64)).toString() }) + await tx.await() + } catch (error) { + if (error instanceof Error) { + console.log(error.toString()) + assert.equal(error.toString().includes("revert data"), true) + } + } + + try { + const dest = convertH160ToSS58(wallet2.address) + const tx = api.tx.Balances.transfer_keep_alive({ value: bigintToRao(BigInt(2) ** BigInt(64)), dest: MultiAddress.Id(dest) }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } catch (error) { + if (error instanceof Error) { + console.log(error.toString()) + assert.equal(error.toString().includes("Cannot convert"), true) + } + } + + try { + const dest = ethAddressToH160(wallet2.address) + const tx = api.tx.EVM.withdraw({ value: bigintToRao(BigInt(2) ** BigInt(64)), address: dest }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } catch (error) { + if (error instanceof Error) { + assert.equal(error.toString().includes("Cannot convert"), true) + } + } + + try { + const source = ethAddressToH160(wallet.address) + const target = ethAddressToH160(wallet2.address) + const tx = api.tx.EVM.call({ + source: source, + target: target, + // it is U256 in the extrinsic, the value is more than u64::MAX + value: [raoToEth(tao(1)), tao(0), tao(0), tao(1)], + gas_limit: BigInt(1000000), + // it is U256 in the extrinsic. + max_fee_per_gas: [BigInt(10e9), BigInt(0), BigInt(0), BigInt(0)], + max_priority_fee_per_gas: undefined, + input: Binary.fromText(""), + nonce: undefined, + access_list: [] + }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } catch (error) { + if (error instanceof Error) { + console.log(error.toString()) + assert.equal((error as any).code, "INSUFFICIENT_FUNDS") + assert.equal(error.toString().includes("insufficient funds"), true) + } + } + + const receiverBalanceAfterTransfer = await publicClient.getBalance({ address: toViemAddress(wallet2.address) }) + assert.equal(receiverBalance, receiverBalanceAfterTransfer) + }); + + it("Gas price should be 10 GWei", async () => { + const feeData = await provider.getFeeData(); + assert.equal(feeData.gasPrice, BigInt(10000000000)); + }); + + + it("max_fee_per_gas and max_priority_fee_per_gas affect transaction fee properly", async () => { + + const testCases = [ + [10, 0, 21000 * 10 * 1e9], + [10, 10, 21000 * 10 * 1e9], + [11, 0, 21000 * 10 * 1e9], + [11, 1, (21000 * 10 + 21000) * 1e9], + [11, 2, (21000 * 10 + 21000) * 1e9], + ]; + + for (let i in testCases) { + const tc = testCases[i]; + const actualFee = await transferAndGetFee( + wallet, wallet2, publicClient, + gwei * BigInt(tc[0]), + gwei * BigInt(tc[1]) + ); + assert.equal(actualFee, BigInt(tc[2])) + } + }); + + it("Low max_fee_per_gas gets transaction rejected", async () => { + try { + await transferAndGetFee(wallet, wallet2, publicClient, gwei * BigInt(9), BigInt(0)) + } catch (error) { + if (error instanceof Error) { + console.log(error.toString()) + assert.equal(error.toString().includes("gas price less than block base fee"), true) + } + } + }); + + it("max_fee_per_gas lower than max_priority_fee_per_gas gets transaction rejected", async () => { + try { + await transferAndGetFee(wallet, wallet2, publicClient, gwei * BigInt(10), gwei * BigInt(11)) + } catch (error) { + if (error instanceof Error) { + assert.equal(error.toString().includes("priorityFee cannot be more than maxFee"), true) + } + } + }); +}); + +async function transferAndGetFee(wallet: ethers.Wallet, wallet2: ethers.Wallet, client: PublicClient, max_fee_per_gas: BigInt, max_priority_fee_per_gas: BigInt) { + + const ethBalanceBefore = await client.getBalance({ address: toViemAddress(wallet.address) }) + // Send TAO + const tx = { + to: wallet2.address, + value: raoToEth(tao(1)).toString(), + // EIP-1559 transaction parameters + maxPriorityFeePerGas: max_priority_fee_per_gas.toString(), + maxFeePerGas: max_fee_per_gas.toString(), + gasLimit: 21000, + }; + + // Send the transaction + const txResponse = await wallet.sendTransaction(tx); + await txResponse.wait() + + // Check balances + const ethBalanceAfter = await client.getBalance({ address: toViemAddress(wallet.address) }) + const fee = ethBalanceBefore - ethBalanceAfter - raoToEth(tao(1)) + + return fee; +} \ No newline at end of file diff --git a/evm-tests/test/metagraph.precompile.test.ts b/evm-tests/test/metagraph.precompile.test.ts new file mode 100644 index 0000000000..94c0df8861 --- /dev/null +++ b/evm-tests/test/metagraph.precompile.test.ts @@ -0,0 +1,147 @@ +import * as assert from "assert"; + +import { getAliceSigner, getClient, getDevnetApi, waitForTransactionCompletion, convertPublicKeyToMultiAddress, getRandomSubstrateKeypair, getSignerFromKeypair } from "../src/substrate" +import { getPublicClient, } from "../src/utils"; +import { ETH_LOCAL_URL, SUB_LOCAL_URL, } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { PublicClient } from "viem"; +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" +import { IMetagraphABI, IMETAGRAPH_ADDRESS } from "../src/contracts/metagraph" + +describe("Test the EVM chain ID", () => { + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + let publicClient: PublicClient; + + let api: TypedApi + + // sudo account alice as signer + let alice: PolkadotSigner; + + // init other variable + let subnetId = 0; + + before(async () => { + // init variables got from await and async + publicClient = await getPublicClient(ETH_LOCAL_URL) + const subClient = await getClient(SUB_LOCAL_URL) + api = await getDevnetApi() + alice = await getAliceSigner(); + + { + const multiAddress = convertPublicKeyToMultiAddress(hotkey.publicKey) + const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } + + { + const multiAddress = convertPublicKeyToMultiAddress(coldkey.publicKey) + const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) + const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) + + await waitForTransactionCompletion(api, tx, alice) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } + + const signer = getSignerFromKeypair(coldkey) + const registerNetworkTx = api.tx.SubtensorModule.register_network({ hotkey: convertPublicKeyToSs58(hotkey.publicKey) }) + await waitForTransactionCompletion(api, registerNetworkTx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + + let totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() + assert.ok(totalNetworks > 1) + subnetId = totalNetworks - 1 + + let uid_count = + await api.query.SubtensorModule.SubnetworkN.getValue(subnetId) + if (uid_count === 0) { + const tx = api.tx.SubtensorModule.burned_register({ hotkey: convertPublicKeyToSs58(hotkey.publicKey), netuid: subnetId }) + await waitForTransactionCompletion(api, tx, signer) + .then(() => { }) + .catch((error) => { console.log(`transaction error ${error}`) }); + } + }) + + it("Metagraph data access via precompile contract is ok", async () => { + const uid = 0 + const uid_count = await publicClient.readContract({ + abi: IMetagraphABI, + address: toViemAddress(IMETAGRAPH_ADDRESS), + functionName: "getUidCount", + args: [subnetId] + }) + // back to original value for other tests. and we can run it repeatedly + assert.ok(uid_count != undefined); + + // const axon = api.query.SubtensorModule.Axons.getValue() + + const axon = await publicClient.readContract({ + abi: IMetagraphABI, + address: toViemAddress(IMETAGRAPH_ADDRESS), + functionName: "getAxon", + args: [subnetId, uid] + }) + + assert.ok(axon != undefined); + if (axon instanceof Object) { + assert.ok(axon != undefined); + if ("block" in axon) { + assert.ok(axon.block != undefined); + } else { + throw new Error("block not included in axon") + } + + if ("version" in axon) { + assert.ok(axon.version != undefined); + } else { + throw new Error("version not included in axon") + } + + if ("ip" in axon) { + assert.ok(axon.ip != undefined); + } else { + throw new Error("ip not included in axon") + } + + if ("port" in axon) { + assert.ok(axon.port != undefined); + } else { + throw new Error("port not included in axon") + } + + if ("ip_type" in axon) { + assert.ok(axon.ip_type != undefined); + } else { + throw new Error("ip_type not included in axon") + } + + if ("protocol" in axon) { + assert.ok(axon.protocol != undefined); + } else { + throw new Error("protocol not included in axon") + } + } + + const methodList = ["getEmission", "getVtrust", "getValidatorStatus", "getLastUpdate", "getIsActive", + "getHotkey", "getColdkey" + ] + for (const method of methodList) { + const value = await publicClient.readContract({ + abi: IMetagraphABI, + address: toViemAddress(IMETAGRAPH_ADDRESS), + functionName: method, + args: [subnetId, uid] + }) + + assert.ok(value != undefined); + } + }); +}); \ No newline at end of file diff --git a/evm-tests/test/neuron.precompile.emission-check.test.ts b/evm-tests/test/neuron.precompile.emission-check.test.ts new file mode 100644 index 0000000000..ac609c1e27 --- /dev/null +++ b/evm-tests/test/neuron.precompile.emission-check.test.ts @@ -0,0 +1,72 @@ +import * as assert from "assert"; + +import { getAliceSigner, getClient, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { getPublicClient, } from "../src/utils"; +import { ETH_LOCAL_URL, SUB_LOCAL_URL, } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { PublicClient } from "viem"; +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58, } from "../src/address-utils" +import { ethers } from "ethers" +import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" +import { generateRandomEthersWallet } from "../src/utils" +import { forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork } from "../src/subtensor" + +describe("Test the EVM chain ID", () => { + // init eth part + const wallet = generateRandomEthersWallet(); + + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const hotkey2 = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + let publicClient: PublicClient; + + let api: TypedApi + + // sudo account alice as signer + let alice: PolkadotSigner; + + before(async () => { + // init variables got from await and async + publicClient = await getPublicClient(ETH_LOCAL_URL) + const subClient = await getClient(SUB_LOCAL_URL) + api = await getDevnetApi() + alice = await getAliceSigner(); + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey2.publicKey)) + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToEthAddress(api, wallet.address) + + const netuid = await addNewSubnetwork(api, hotkey2, coldkey) + console.log("test on subnet ", netuid) + }) + + it("Burned register and check emission", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const uid = await api.query.SubtensorModule.SubnetworkN.getValue(netuid) + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); + + const tx = await contract.burnedRegister( + netuid, + hotkey.publicKey + ); + await tx.wait(); + + const uidAfterNew = await api.query.SubtensorModule.SubnetworkN.getValue(netuid) + assert.equal(uid + 1, uidAfterNew) + + const key = await api.query.SubtensorModule.Keys.getValue(netuid, uid) + assert.equal(key, convertPublicKeyToSs58(hotkey.publicKey)) + + let i = 0; + while (i < 10) { + const emission = await api.query.SubtensorModule.PendingEmission.getValue(netuid) + + console.log("emission is ", emission); + await new Promise((resolve) => setTimeout(resolve, 2000)); + i += 1; + } + }) +}); \ No newline at end of file diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts new file mode 100644 index 0000000000..85125f0956 --- /dev/null +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -0,0 +1,142 @@ +import * as assert from "assert"; +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" +import { Vec, Tuple, VecFixed, u16, u8, u64 } from "@polkadot/types-codec"; +import { TypeRegistry } from "@polkadot/types"; +import { ethers } from "ethers" +import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" +import { generateRandomEthersWallet } from "../src/utils" +import { convertH160ToPublicKey } from "../src/address-utils" +import { blake2AsU8a } from "@polkadot/util-crypto" +import { + forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, setCommitRevealWeightsEnabled, setWeightsSetRateLimit, burnedRegister, + setTempo, setCommitRevealWeightsInterval +} from "../src/subtensor" + +// hardcode some values for reveal hash +const uids = [1]; +const values = [5]; +const salt = [9]; +const version_key = 0; + +function getCommitHash(netuid: number, address: string) { + const registry = new TypeRegistry(); + let publicKey = convertH160ToPublicKey(address); + + const tupleData = new Tuple( + registry, + [ + VecFixed.with(u8, 32), + u16, + Vec.with(u16), + Vec.with(u16), + Vec.with(u16), + u64, + ], + [publicKey, netuid, uids, values, salt, version_key] + ); + + const hash = blake2AsU8a(tupleData.toU8a()); + return hash; +} + +describe("Test neuron precompile reveal weights", () => { + // init eth part + const wallet = generateRandomEthersWallet(); + + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + + let api: TypedApi + + // sudo account alice as signer + let alice: PolkadotSigner; + before(async () => { + // init variables got from await and async + api = await getDevnetApi() + alice = await getAliceSigner(); + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(alice.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToEthAddress(api, wallet.address) + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + + console.log("test the case on subnet ", netuid) + + // enable commit reveal feature + await setCommitRevealWeightsEnabled(api, netuid, true) + // set it as 0, we can set the weight anytime + await setWeightsSetRateLimit(api, netuid, BigInt(0)) + + const ss58Address = convertH160ToSS58(wallet.address) + await burnedRegister(api, netuid, ss58Address, coldkey) + + const uid = await api.query.SubtensorModule.Uids.getValue( + netuid, + ss58Address + ) + // eth wallet account should be the first neuron in the subnet + assert.equal(uid, uids[0]) + }) + + it("EVM neuron commit weights via call precompile", async () => { + let totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() + const subnetId = totalNetworks - 1 + const commitHash = getCommitHash(subnetId, wallet.address) + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); + const tx = await contract.commitWeights(subnetId, commitHash) + await tx.wait() + + const ss58Address = convertH160ToSS58(wallet.address) + + const weightsCommit = await api.query.SubtensorModule.WeightCommits.getValue(subnetId, ss58Address) + if (weightsCommit === undefined) { + throw new Error("submit weights failed") + } + assert.ok(weightsCommit.length > 0) + }) + + it("EVM neuron reveal weights via call precompile", async () => { + let totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() + const netuid = totalNetworks - 1 + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); + // set tempo or epoch large, then enough time to reveal weight + await setTempo(api, netuid, 60000) + // set interval epoch as 0, we can reveal at the same epoch + await setCommitRevealWeightsInterval(api, netuid, BigInt(0)) + + const tx = await contract.revealWeights( + netuid, + uids, + values, + salt, + version_key + ); + await tx.wait() + const ss58Address = convertH160ToSS58(wallet.address) + + // check the weight commit is removed after reveal successfully + const weightsCommit = await api.query.SubtensorModule.WeightCommits.getValue(netuid, ss58Address) + assert.equal(weightsCommit, undefined) + + // check the weight is set after reveal with correct uid + const neuron_uid = await api.query.SubtensorModule.Uids.getValue( + netuid, + ss58Address + ) + + const weights = await api.query.SubtensorModule.Weights.getValue(netuid, neuron_uid) + + if (weights === undefined) { + throw new Error("weights not available onchain") + } + for (const weight of weights) { + assert.equal(weight[0], neuron_uid) + assert.ok(weight[1] !== undefined) + } + }) +}); \ No newline at end of file diff --git a/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts b/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts new file mode 100644 index 0000000000..aee84f130c --- /dev/null +++ b/evm-tests/test/neuron.precompile.serve.axon-prometheus.test.ts @@ -0,0 +1,162 @@ +import * as assert from "assert"; +import { getAliceSigner, getClient, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { SUB_LOCAL_URL, } from "../src/config"; +import { devnet } from "@polkadot-api/descriptors" +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" +import { ethers } from "ethers" +import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" +import { generateRandomEthersWallet } from "../src/utils" +import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister } from "../src/subtensor" + +describe("Test neuron precompile Serve Axon Prometheus", () => { + // init eth part + const wallet1 = generateRandomEthersWallet(); + const wallet2 = generateRandomEthersWallet(); + const wallet3 = generateRandomEthersWallet(); + + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + + let api: TypedApi + + // sudo account alice as signer + let alice: PolkadotSigner; + before(async () => { + // init variables got from await and async + const subClient = await getClient(SUB_LOCAL_URL) + api = await getDevnetApi() + alice = await getAliceSigner(); + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(alice.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToEthAddress(api, wallet1.address) + await forceSetBalanceToEthAddress(api, wallet2.address) + await forceSetBalanceToEthAddress(api, wallet3.address) + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + + console.log("test the case on subnet ", netuid) + + await burnedRegister(api, netuid, convertH160ToSS58(wallet1.address), coldkey) + await burnedRegister(api, netuid, convertH160ToSS58(wallet2.address), coldkey) + await burnedRegister(api, netuid, convertH160ToSS58(wallet3.address), coldkey) + }) + + it("Serve Axon", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const version = 0; + const ip = 1; + const port = 2; + const ipType = 4; + const protocol = 0; + const placeholder1 = 8; + const placeholder2 = 9; + + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet1); + + const tx = await contract.serveAxon( + netuid, + version, + ip, + port, + ipType, + protocol, + placeholder1, + placeholder2 + ); + await tx.wait(); + + const axon = await api.query.SubtensorModule.Axons.getValue( + netuid, + convertH160ToSS58(wallet1.address) + ) + assert.notEqual(axon?.block, undefined) + assert.equal(axon?.version, version) + assert.equal(axon?.ip, ip) + assert.equal(axon?.port, port) + assert.equal(axon?.ip_type, ipType) + assert.equal(axon?.protocol, protocol) + assert.equal(axon?.placeholder1, placeholder1) + assert.equal(axon?.placeholder2, placeholder2) + }); + + it("Serve Axon TLS", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const version = 0; + const ip = 1; + const port = 2; + const ipType = 4; + const protocol = 0; + const placeholder1 = 8; + const placeholder2 = 9; + // certificate length is 65 + const certificate = new Uint8Array([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, + ]); + + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet2); + + const tx = await contract.serveAxonTls( + netuid, + version, + ip, + port, + ipType, + protocol, + placeholder1, + placeholder2, + certificate + ); + await tx.wait(); + + const axon = await api.query.SubtensorModule.Axons.getValue( + netuid, + convertH160ToSS58(wallet2.address)) + + assert.notEqual(axon?.block, undefined) + assert.equal(axon?.version, version) + assert.equal(axon?.ip, ip) + assert.equal(axon?.port, port) + assert.equal(axon?.ip_type, ipType) + assert.equal(axon?.protocol, protocol) + assert.equal(axon?.placeholder1, placeholder1) + assert.equal(axon?.placeholder2, placeholder2) + }); + + it("Serve Prometheus", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const version = 0; + const ip = 1; + const port = 2; + const ipType = 4; + + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet3); + + const tx = await contract.servePrometheus( + netuid, + version, + ip, + port, + ipType + ); + await tx.wait(); + + const prometheus = ( + await api.query.SubtensorModule.Prometheus.getValue( + netuid, + convertH160ToSS58(wallet3.address) + ) + ) + + assert.notEqual(prometheus?.block, undefined) + assert.equal(prometheus?.version, version) + assert.equal(prometheus?.ip, ip) + assert.equal(prometheus?.port, port) + assert.equal(prometheus?.ip_type, ipType) + }); +}); \ No newline at end of file diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts new file mode 100644 index 0000000000..393c2b97b8 --- /dev/null +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -0,0 +1,65 @@ +import * as assert from "assert"; + +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { TypedApi } from "polkadot-api"; +import { convertH160ToSS58, convertPublicKeyToSs58, } from "../src/address-utils" +import { ethers } from "ethers" +import { INEURON_ADDRESS, INeuronABI } from "../src/contracts/neuron" +import { generateRandomEthersWallet } from "../src/utils" +import { + forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork, burnedRegister, setCommitRevealWeightsEnabled, + setWeightsSetRateLimit +} from "../src/subtensor" + +describe("Test neuron precompile contract, set weights function", () => { + // init eth part + const wallet = generateRandomEthersWallet(); + + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + + let api: TypedApi + + before(async () => { + api = await getDevnetApi() + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToEthAddress(api, wallet.address) + + const netuid = await addNewSubnetwork(api, hotkey, coldkey) + console.log("test on subnet ", netuid) + + await burnedRegister(api, netuid, convertH160ToSS58(wallet.address), coldkey) + const uid = await api.query.SubtensorModule.Uids.getValue(netuid, convertH160ToSS58(wallet.address)) + assert.notEqual(uid, undefined) + // disable reveal and enable direct set weights + await setCommitRevealWeightsEnabled(api, netuid, false) + await setWeightsSetRateLimit(api, netuid, BigInt(0)) + }) + + it("Set weights is ok", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const uid = await api.query.SubtensorModule.Uids.getValue(netuid, convertH160ToSS58(wallet.address)) + + const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); + const dests = [1]; + const weights = [2]; + const version_key = 0; + + const tx = await contract.setWeights(netuid, dests, weights, version_key); + + await tx.wait(); + const weightsOnChain = await api.query.SubtensorModule.Weights.getValue(netuid, uid) + + weightsOnChain.forEach((weight, _) => { + const uidInWeight = weight[0]; + const value = weight[1]; + assert.equal(uidInWeight, uid) + assert.ok(value > 0) + }); + }) +}); \ No newline at end of file diff --git a/evm-tests/test/staking.precompile.add-remove.test.ts b/evm-tests/test/staking.precompile.add-remove.test.ts new file mode 100644 index 0000000000..5387e62428 --- /dev/null +++ b/evm-tests/test/staking.precompile.add-remove.test.ts @@ -0,0 +1,326 @@ +import * as assert from "assert"; +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" +import { raoToEth, tao } from "../src/balance-math" +import { ethers } from "ethers" +import { generateRandomEthersWallet, getPublicClient } from "../src/utils" +import { convertH160ToPublicKey } from "../src/address-utils" +import { + forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, + sendProxyCall, +} from "../src/subtensor" +import { ETH_LOCAL_URL } from "../src/config"; +import { ISTAKING_ADDRESS, ISTAKING_V2_ADDRESS, IStakingABI, IStakingV2ABI } from "../src/contracts/staking" +import { PublicClient } from "viem"; + +describe("Test neuron precompile reveal weights", () => { + // init eth part + const wallet1 = generateRandomEthersWallet(); + const wallet2 = generateRandomEthersWallet(); + let publicClient: PublicClient; + // init substrate part + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + const proxy = getRandomSubstrateKeypair(); + + let api: TypedApi + + // sudo account alice as signer + let alice: PolkadotSigner; + before(async () => { + publicClient = await getPublicClient(ETH_LOCAL_URL) + // init variables got from await and async + api = await getDevnetApi() + + // await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(alice.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(proxy.publicKey)) + await forceSetBalanceToEthAddress(api, wallet1.address) + await forceSetBalanceToEthAddress(api, wallet2.address) + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + + console.log("test the case on subnet ", netuid) + + await burnedRegister(api, netuid, convertH160ToSS58(wallet1.address), coldkey) + await burnedRegister(api, netuid, convertH160ToSS58(wallet2.address), coldkey) + }) + + it("Can add stake", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + // ETH unit + let stakeBalance = raoToEth(tao(20)) + const stakeBefore = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet1.address), netuid) + const contract = new ethers.Contract(ISTAKING_ADDRESS, IStakingABI, wallet1); + const tx = await contract.addStake(hotkey.publicKey, netuid, { value: stakeBalance.toString() }) + await tx.wait() + + const stakeFromContract = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid) + ); + + assert.ok(stakeFromContract > stakeBefore) + const stakeAfter = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet1.address), netuid) + assert.ok(stakeAfter > stakeBefore) + }) + + it("Can add stake V2", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + // the unit in V2 is RAO, not ETH + let stakeBalance = tao(20) + const stakeBefore = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet2.address), netuid) + const contract = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet2); + const tx = await contract.addStake(hotkey.publicKey, stakeBalance.toString(), netuid) + await tx.wait() + + const stakeFromContract = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet2.address), netuid) + ); + + assert.ok(stakeFromContract > stakeBefore) + const stakeAfter = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet2.address), netuid) + assert.ok(stakeAfter > stakeBefore) + }) + + it("Can not add stake if subnet doesn't exist", async () => { + // wrong netuid + let netuid = 12345; + let stakeBalance = raoToEth(tao(20)) + const stakeBefore = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet1.address), netuid) + const contract = new ethers.Contract(ISTAKING_ADDRESS, IStakingABI, wallet1); + try { + const tx = await contract.addStake(hotkey.publicKey, netuid, { value: stakeBalance.toString() }) + await tx.wait() + assert.fail("Transaction should have failed"); + } catch (error) { + // Transaction failed as expected + } + + const stakeFromContract = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid) + ); + assert.equal(stakeFromContract, stakeBefore) + const stakeAfter = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet1.address), netuid) + assert.equal(stakeAfter, stakeBefore) + }); + + it("Can not add stake V2 if subnet doesn't exist", async () => { + // wrong netuid + let netuid = 12345; + // the unit in V2 is RAO, not ETH + let stakeBalance = tao(20) + const stakeBefore = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet2.address), netuid) + const contract = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet2); + + try { + const tx = await contract.addStake(hotkey.publicKey, stakeBalance.toString(), netuid); + await tx.wait(); + assert.fail("Transaction should have failed"); + } catch (error) { + // Transaction failed as expected + } + + const stakeFromContract = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet2.address), netuid) + ); + assert.equal(stakeFromContract, stakeBefore) + const stakeAfter = await api.query.SubtensorModule.Alpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), convertH160ToSS58(wallet2.address), netuid) + assert.equal(stakeAfter, stakeBefore) + }) + + it("Can get stake via contract read method", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + + // TODO need check how to pass bytes32 as parameter of readContract + // const value = await publicClient.readContract({ + // address: ISTAKING_ADDRESS, + // abi: IStakingABI, + // functionName: "getStake", + // args: [hotkey.publicKey, // Convert to bytes32 format + // convertH160ToPublicKey(wallet1.address), + // netuid] + // }) + // if (value === undefined || value === null) { + // throw new Error("value of getStake from contract is undefined") + // } + // const intValue = BigInt(value.toString()) + + const contractV1 = new ethers.Contract(ISTAKING_ADDRESS, IStakingABI, wallet1); + const stakeFromContractV1 = BigInt( + await contractV1.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid) + ); + + const contractV2 = new ethers.Contract(ISTAKING_V2_ADDRESS, IStakingV2ABI, wallet1); + // unit from contract V2 is RAO, not ETH + const stakeFromContractV2 = Number( + await contractV2.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid) + ); + + assert.equal(stakeFromContractV1, tao(stakeFromContractV2)) + + }) + + it("Can remove stake", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const contract = new ethers.Contract( + ISTAKING_ADDRESS, + IStakingABI, + wallet1 + ); + + const stakeBeforeRemove = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid) + ); + + let stakeBalance = raoToEth(tao(10)) + const tx = await contract.removeStake(hotkey.publicKey, stakeBalance, netuid) + await tx.wait() + + const stakeAfterRemove = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet1.address), netuid) + ); + assert.ok(stakeAfterRemove < stakeBeforeRemove) + + }) + + it("Can remove stake V2", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet2 + ); + + const stakeBeforeRemove = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet2.address), netuid) + ); + + let stakeBalance = tao(10) + const tx = await contract.removeStake(hotkey.publicKey, stakeBalance, netuid) + await tx.wait() + + const stakeAfterRemove = BigInt( + await contract.getStake(hotkey.publicKey, convertH160ToPublicKey(wallet2.address), netuid) + ); + + assert.ok(stakeAfterRemove < stakeBeforeRemove) + }) + + it("Can add/remove proxy", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + // add/remove are done in a single test case, because we can't use the same private/public key + // between substrate and EVM, but to test the remove part, we must predefine the proxy first. + // it makes `remove` being dependent on `add`, because we should use `addProxy` from contract + // to prepare the proxy for `removeProxy` testing - the proxy is specified for the + // caller/origin. + + // first, check we don't have proxies + const ss58Address = convertH160ToSS58(wallet1.address); + // the result include two items array, first one is delegate info, second one is balance + const initProxies = await api.query.Proxy.Proxies.getValue(ss58Address); + assert.equal(initProxies[0].length, 0); + + // intialize the contract + const contract = new ethers.Contract( + ISTAKING_ADDRESS, + IStakingABI, + wallet1 + ); + + // test "add" + let tx = await contract.addProxy(proxy.publicKey); + await tx.wait(); + + const proxiesAfterAdd = await api.query.Proxy.Proxies.getValue(ss58Address); + + assert.equal(proxiesAfterAdd[0][0].delegate, convertPublicKeyToSs58(proxy.publicKey)) + + let stakeBefore = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid + ) + + const call = api.tx.SubtensorModule.add_stake({ + hotkey: convertPublicKeyToSs58(hotkey.publicKey), + netuid: netuid, + amount_staked: tao(1) + }) + await sendProxyCall(api, call.decodedCall, ss58Address, proxy) + + let stakeAfter = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid + ) + + assert.ok(stakeAfter > stakeBefore) + // test "remove" + tx = await contract.removeProxy(proxy.publicKey); + await tx.wait(); + + const proxiesAfterRemove = await api.query.Proxy.Proxies.getValue(ss58Address); + assert.equal(proxiesAfterRemove[0].length, 0) + }); + + it("Can add/remove proxy V2", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + // add/remove are done in a single test case, because we can't use the same private/public key + // between substrate and EVM, but to test the remove part, we must predefine the proxy first. + // it makes `remove` being dependent on `add`, because we should use `addProxy` from contract + // to prepare the proxy for `removeProxy` testing - the proxy is specified for the + // caller/origin. + + // first, check we don't have proxies + const ss58Address = convertH160ToSS58(wallet1.address); + // the result include two items array, first one is delegate info, second one is balance + const initProxies = await api.query.Proxy.Proxies.getValue(ss58Address); + assert.equal(initProxies[0].length, 0); + + // intialize the contract + // const signer = new ethers.Wallet(fundedEthWallet.privateKey, provider); + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet1 + ); + + // test "add" + let tx = await contract.addProxy(proxy.publicKey); + await tx.wait(); + + const proxiesAfterAdd = await api.query.Proxy.Proxies.getValue(ss58Address); + + assert.equal(proxiesAfterAdd[0][0].delegate, convertPublicKeyToSs58(proxy.publicKey)) + + let stakeBefore = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid + ) + + const call = api.tx.SubtensorModule.add_stake({ + hotkey: convertPublicKeyToSs58(hotkey.publicKey), + netuid: netuid, + amount_staked: tao(1) + }) + + await sendProxyCall(api, call.decodedCall, ss58Address, proxy) + + let stakeAfter = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + ss58Address, + netuid + ) + + assert.ok(stakeAfter > stakeBefore) + // test "remove" + tx = await contract.removeProxy(proxy.publicKey); + await tx.wait(); + + const proxiesAfterRemove = await api.query.Proxy.Proxies.getValue(ss58Address); + assert.equal(proxiesAfterRemove[0].length, 0) + }); +}); diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts new file mode 100644 index 0000000000..3600a6d08d --- /dev/null +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -0,0 +1,105 @@ +import * as assert from "assert"; +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58 } from "../src/address-utils" +import { tao } from "../src/balance-math" +import { + forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, + setTxRateLimit, setTempo, setWeightsSetRateLimit, setSubnetOwnerCut, setMaxAllowedUids, + setMinDelegateTake, becomeDelegate, setActivityCutoff, addStake, setWeight, rootRegister +} from "../src/subtensor" + +describe("Test neuron precompile reveal weights", () => { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + + const validator = getRandomSubstrateKeypair(); + const miner = getRandomSubstrateKeypair(); + const nominator = getRandomSubstrateKeypair(); + + let api: TypedApi + + before(async () => { + const root_netuid = 0; + const root_tempo = 1; // neet root epoch to happen before subnet tempo + const subnet_tempo = 1; + api = await getDevnetApi() + + // await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(alice.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(validator.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(miner.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(nominator.publicKey)) + // await forceSetBalanceToEthAddress(api, wallet1.address) + // await forceSetBalanceToEthAddress(api, wallet2.address) + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + + console.log("test the case on subnet ", netuid) + + await setTxRateLimit(api, BigInt(0)) + await setTempo(api, root_netuid, root_tempo) + await setTempo(api, netuid, subnet_tempo) + await setWeightsSetRateLimit(api, netuid, BigInt(0)) + + await burnedRegister(api, netuid, convertPublicKeyToSs58(validator.publicKey), coldkey) + await burnedRegister(api, netuid, convertPublicKeyToSs58(miner.publicKey), coldkey) + await burnedRegister(api, netuid, convertPublicKeyToSs58(nominator.publicKey), coldkey) + await setSubnetOwnerCut(api, 0) + await setActivityCutoff(api, netuid, 65535) + await setMaxAllowedUids(api, netuid, 65535) + await setMinDelegateTake(api, 0) + await becomeDelegate(api, convertPublicKeyToSs58(validator.publicKey), coldkey) + await becomeDelegate(api, convertPublicKeyToSs58(miner.publicKey), coldkey) + }) + + it("Staker receives rewards", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + + await addStake(api, netuid, convertPublicKeyToSs58(miner.publicKey), tao(1), coldkey) + await addStake(api, netuid, convertPublicKeyToSs58(nominator.publicKey), tao(1), coldkey) + + await addStake(api, netuid, convertPublicKeyToSs58(validator.publicKey), tao(100), coldkey) + + const miner_alpha_before_emission = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(miner.publicKey), + convertPublicKeyToSs58(coldkey.publicKey), + netuid + ) + + await setWeight(api, netuid, [0, 1], [0xffff, 0xffff], BigInt(0), validator) + await rootRegister(api, convertPublicKeyToSs58(validator.publicKey), coldkey) + + let index = 0; + while (index < 60) { + const pending = await api.query.SubtensorModule.PendingEmission.getValue(netuid); + if (pending > 0) { + console.log("pending amount is ", pending); + break; + } + + await new Promise((resolve) => setTimeout(resolve, 1000)); + console.log("wait for the pendingEmission update"); + index += 1; + } + + index = 0; + while (index < 60) { + let miner_current_alpha = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(miner.publicKey), + convertPublicKeyToSs58(coldkey.publicKey), + netuid + ) + + if (miner_current_alpha > miner_alpha_before_emission) { + console.log("miner got reward"); + break; + } + + await new Promise((resolve) => setTimeout(resolve, 1000)); + console.log(" waiting for emission"); + index += 1; + } + }) +}) diff --git a/evm-tests/test/staking.precompile.stake-get.test.ts b/evm-tests/test/staking.precompile.stake-get.test.ts new file mode 100644 index 0000000000..37a23d8db2 --- /dev/null +++ b/evm-tests/test/staking.precompile.stake-get.test.ts @@ -0,0 +1,57 @@ +import * as assert from "assert"; +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58 } from "../src/address-utils" +import { tao } from "../src/balance-math" +import { + forceSetBalanceToSs58Address, addNewSubnetwork, addStake, +} from "../src/subtensor" +import { ethers } from "ethers"; +import { generateRandomEthersWallet } from "../src/utils" +import { ISTAKING_V2_ADDRESS, IStakingV2ABI } from "../src/contracts/staking" +import { log } from "console"; + +describe("Test staking precompile get methods", () => { + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + const wallet1 = generateRandomEthersWallet(); + + let api: TypedApi + + before(async () => { + api = await getDevnetApi() + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + console.log("will test in subnet: ", netuid) + }) + + it("Staker receives rewards", async () => { + let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1 + + await addStake(api, netuid, convertPublicKeyToSs58(hotkey.publicKey), tao(1), coldkey) + + const contract = new ethers.Contract( + ISTAKING_V2_ADDRESS, + IStakingV2ABI, + wallet1 + ); + + const stake = BigInt( + await contract.getStake(hotkey.publicKey, coldkey.publicKey, netuid) + ); + + // validator returned as bigint now. + const validators = + await contract.getAlphaStakedValidators(hotkey.publicKey, netuid) + + const alpha = BigInt( + await contract.getTotalAlphaStaked(hotkey.publicKey, netuid) + ); + assert.ok(stake > 0) + assert.equal(validators.length, 1) + assert.ok(alpha > 0) + + }) +}) diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts new file mode 100644 index 0000000000..1805b85ce9 --- /dev/null +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -0,0 +1,442 @@ +import * as assert from "assert"; + +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { devnet } from "@polkadot-api/descriptors" +import { TypedApi } from "polkadot-api"; +import { convertPublicKeyToSs58 } from "../src/address-utils" +import { generateRandomEthersWallet } from "../src/utils"; +import { ISubnetABI, ISUBNET_ADDRESS } from "../src/contracts/subnet" +import { ethers } from "ethers" +import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address } from "../src/subtensor" + +describe("Test the Subnet precompile contract", () => { + // init eth part + const wallet = generateRandomEthersWallet(); + // init substrate part + + const hotkey1 = getRandomSubstrateKeypair(); + const hotkey2 = getRandomSubstrateKeypair(); + let api: TypedApi + + before(async () => { + // init variables got from await and async + api = await getDevnetApi() + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey1.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey2.publicKey)) + await forceSetBalanceToEthAddress(api, wallet.address) + }) + + it("Can register network without identity info", async () => { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const tx = await contract.registerNetwork(hotkey1.publicKey); + await tx.wait(); + + const totalNetworkAfterAdd = await api.query.SubtensorModule.TotalNetworks.getValue() + assert.ok(totalNetwork + 1 === totalNetworkAfterAdd) + }); + + it("Can register network with identity info", async () => { + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const tx = await contract.registerNetwork(hotkey2.publicKey, + "name", + "repo", + "contact", + "subnetUrl", + "discord", + "description", + "additional" + ); + await tx.wait(); + + const totalNetworkAfterAdd = await api.query.SubtensorModule.TotalNetworks.getValue() + assert.ok(totalNetwork + 1 === totalNetworkAfterAdd) + }); + + it("Can set subnet parameter", async () => { + + const totalNetwork = await api.query.SubtensorModule.TotalNetworks.getValue() + const contract = new ethers.Contract(ISUBNET_ADDRESS, ISubnetABI, wallet); + const netuid = totalNetwork - 1; + + // servingRateLimit hyperparameter + { + const newValue = 100; + const tx = await contract.setServingRateLimit(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.ServingRateLimit.getValue(netuid) + + + let valueFromContract = Number( + await contract.getServingRateLimit(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // minDifficulty hyperparameter + // + // disabled: only by sudo + // + // newValue = 101; + // tx = await contract.setMinDifficulty(netuid, newValue); + // await tx.wait(); + + // await usingApi(async (api) => { + // onchainValue = Number( + // await api.query.subtensorModule.minDifficulty(netuid) + // ); + // }); + + // valueFromContract = Number(await contract.getMinDifficulty(netuid)); + + // expect(valueFromContract).to.eq(newValue); + // expect(valueFromContract).to.eq(onchainValue); + + // maxDifficulty hyperparameter + + { + const newValue = 102; + const tx = await contract.setMaxDifficulty(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.MaxDifficulty.getValue(netuid) + + + let valueFromContract = Number( + await contract.getMaxDifficulty(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // weightsVersionKey hyperparameter + { + const newValue = 103; + const tx = await contract.setWeightsVersionKey(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.WeightsVersionKey.getValue(netuid) + + + let valueFromContract = Number( + await contract.getWeightsVersionKey(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + // weightsSetRateLimit hyperparameter + { + const newValue = 104; + const tx = await contract.setWeightsSetRateLimit(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.WeightsSetRateLimit.getValue(netuid) + + + let valueFromContract = Number( + await contract.getWeightsSetRateLimit(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // adjustmentAlpha hyperparameter + { + const newValue = 105; + const tx = await contract.setAdjustmentAlpha(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.AdjustmentAlpha.getValue(netuid) + + + let valueFromContract = Number( + await contract.getAdjustmentAlpha(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // maxWeightLimit hyperparameter + { + const newValue = 106; + const tx = await contract.setMaxWeightLimit(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.MaxWeightsLimit.getValue(netuid) + + + let valueFromContract = Number( + await contract.getMaxWeightLimit(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + // immunityPeriod hyperparameter + { + const newValue = 107; + const tx = await contract.setImmunityPeriod(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.ImmunityPeriod.getValue(netuid) + + + let valueFromContract = Number( + await contract.getImmunityPeriod(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // minAllowedWeights hyperparameter + { + const newValue = 108; + const tx = await contract.setMinAllowedWeights(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.MinAllowedWeights.getValue(netuid) + + + let valueFromContract = Number( + await contract.getMinAllowedWeights(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // kappa hyperparameter + { + const newValue = 109; + const tx = await contract.setKappa(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.Kappa.getValue(netuid) + + + let valueFromContract = Number( + await contract.getKappa(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // rho hyperparameter + { + const newValue = 110; + const tx = await contract.setRho(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.Rho.getValue(netuid) + + + let valueFromContract = Number( + await contract.getRho(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // activityCutoff hyperparameter + { + const newValue = 111; + const tx = await contract.setActivityCutoff(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.ActivityCutoff.getValue(netuid) + + + let valueFromContract = Number( + await contract.getActivityCutoff(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // networkRegistrationAllowed hyperparameter + { + const newValue = true; + const tx = await contract.setNetworkRegistrationAllowed(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.NetworkRegistrationAllowed.getValue(netuid) + + + let valueFromContract = Boolean( + await contract.getNetworkRegistrationAllowed(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // networkPowRegistrationAllowed hyperparameter + { + const newValue = true; + const tx = await contract.setNetworkPowRegistrationAllowed(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.NetworkPowRegistrationAllowed.getValue(netuid) + + + let valueFromContract = Boolean( + await contract.getNetworkPowRegistrationAllowed(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // minBurn hyperparameter. only sudo can set it now + // newValue = 112; + + // tx = await contract.setMinBurn(netuid, newValue); + // await tx.wait(); + + // await usingApi(async (api) => { + // onchainValue = Number( + // await api.query.subtensorModule.minBurn(netuid) + // ); + // }); + + // valueFromContract = Number(await contract.getMinBurn(netuid)); + + // expect(valueFromContract).to.eq(newValue); + // expect(valueFromContract).to.eq(onchainValue); + + // maxBurn hyperparameter + { + const newValue = 113; + const tx = await contract.setMaxBurn(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.MaxBurn.getValue(netuid) + + + let valueFromContract = Number( + await contract.getMaxBurn(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + + // difficulty hyperparameter (disabled: sudo only) + // newValue = 114; + + // tx = await contract.setDifficulty(netuid, newValue); + // await tx.wait(); + + // await usingApi(async (api) => { + // onchainValue = Number( + // await api.query.subtensorModule.difficulty(netuid) + // ); + // }); + + // valueFromContract = Number(await contract.getDifficulty(netuid)); + + // expect(valueFromContract).to.eq(newValue); + // expect(valueFromContract).to.eq(onchainValue); + + // bondsMovingAverage hyperparameter + { + const newValue = 115; + const tx = await contract.setBondsMovingAverage(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.BondsMovingAverage.getValue(netuid) + + + let valueFromContract = Number( + await contract.getBondsMovingAverage(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + + // commitRevealWeightsEnabled hyperparameter + { + const newValue = true; + const tx = await contract.setCommitRevealWeightsEnabled(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.CommitRevealWeightsEnabled.getValue(netuid) + + + let valueFromContract = Boolean( + await contract.getCommitRevealWeightsEnabled(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // liquidAlphaEnabled hyperparameter + { + const newValue = true; + const tx = await contract.setLiquidAlphaEnabled(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.LiquidAlphaOn.getValue(netuid) + + + let valueFromContract = Boolean( + await contract.getLiquidAlphaEnabled(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + + // alphaValues hyperparameter + { + const newValue = [118, 52429]; + const tx = await contract.setAlphaValues(netuid, newValue[0], newValue[1]); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.AlphaValues.getValue(netuid) + + let value = await contract.getAlphaValues(netuid) + let valueFromContract = [Number(value[0]), Number(value[1])] + + assert.equal(valueFromContract[0], newValue[0]) + assert.equal(valueFromContract[1], newValue[1]) + assert.equal(valueFromContract[0], onchainValue[0]); + assert.equal(valueFromContract[1], onchainValue[1]); + } + + // commitRevealWeightsInterval hyperparameter + { + const newValue = 119; + const tx = await contract.setCommitRevealWeightsInterval(netuid, newValue); + await tx.wait(); + + let onchainValue = await api.query.SubtensorModule.RevealPeriodEpochs.getValue(netuid) + + let valueFromContract = Number( + await contract.getCommitRevealWeightsInterval(netuid) + ); + + assert.equal(valueFromContract, newValue) + assert.equal(valueFromContract, onchainValue); + } + }) +}); \ No newline at end of file diff --git a/evm-tests/tsconfig.json b/evm-tests/tsconfig.json new file mode 100644 index 0000000000..c9c555d96f --- /dev/null +++ b/evm-tests/tsconfig.json @@ -0,0 +1,111 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig to read more about this file */ + + /* Projects */ + // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */ + // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ + // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */ + // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */ + // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ + // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ + + /* Language and Environment */ + "target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ + // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + // "jsx": "preserve", /* Specify what JSX code is generated. */ + // "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */ + // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ + // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */ + // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ + // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */ + // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */ + // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ + // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ + // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ + + /* Modules */ + "module": "commonjs", /* Specify what module code is generated. */ + // "rootDir": "./", /* Specify the root folder within your source files. */ + // "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */ + // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ + // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ + // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ + // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */ + // "types": [], /* Specify type package names to be included without being referenced in a source file. */ + // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ + // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */ + // "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */ + // "rewriteRelativeImportExtensions": true, /* Rewrite '.ts', '.tsx', '.mts', and '.cts' file extensions in relative import paths to their JavaScript equivalent in output files. */ + // "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */ + // "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */ + // "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */ + // "noUncheckedSideEffectImports": true, /* Check side effect imports. */ + // "resolveJsonModule": true, /* Enable importing .json files. */ + // "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */ + // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */ + + /* JavaScript Support */ + // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ + // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ + // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ + + /* Emit */ + // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ + // "declarationMap": true, /* Create sourcemaps for d.ts files. */ + // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ + // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ + // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ + // "noEmit": true, /* Disable emitting files from a compilation. */ + // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */ + // "outDir": "./", /* Specify an output folder for all emitted files. */ + // "removeComments": true, /* Disable emitting comments. */ + // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ + // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ + // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ + // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ + // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ + // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ + // "newLine": "crlf", /* Set the newline character for emitting files. */ + // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */ + // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */ + // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ + // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */ + // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ + + /* Interop Constraints */ + // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ + // "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */ + // "isolatedDeclarations": true, /* Require sufficient annotation on exports so other tools can trivially generate declaration files. */ + // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ + "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */ + // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ + "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */ + + /* Type Checking */ + "strict": true, /* Enable all strict type-checking options. */ + // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ + // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */ + // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ + // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */ + // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ + // "strictBuiltinIteratorReturn": true, /* Built-in iterators are instantiated with a 'TReturn' type of 'undefined' instead of 'any'. */ + // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ + // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ + // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ + // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ + // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ + // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ + // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ + // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ + // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ + // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ + // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ + // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ + // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ + + /* Completeness */ + // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ + "skipLibCheck": true /* Skip type checking all .d.ts files. */ + } +} diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 645eaa92c4..19bbbee73b 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -25,9 +25,13 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_support::traits::tokens::Balance; - use frame_support::{dispatch::DispatchResult, pallet_prelude::StorageMap}; + use frame_support::{ + dispatch::{DispatchResult, RawOrigin}, + pallet_prelude::StorageMap, + }; use frame_system::pallet_prelude::*; use pallet_evm_chain_id::{self, ChainId}; + use pallet_subtensor::utils::rate_limiting::TransactionType; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; @@ -249,12 +253,35 @@ pub mod pallet { netuid: u16, weights_version_key: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); + + if let Ok(RawOrigin::Signed(who)) = origin.into() { + // SN Owner + // Ensure the origin passes the rate limit. + ensure!( + pallet_subtensor::Pallet::::passes_rate_limit_on_subnet( + &TransactionType::SetWeightsVersionKey, + &who, + netuid, + ), + pallet_subtensor::Error::::TxRateLimitExceeded + ); + + // Set last transaction block + let current_block = pallet_subtensor::Pallet::::get_current_block_as_u64(); + pallet_subtensor::Pallet::::set_last_transaction_block_on_subnet( + &who, + netuid, + &TransactionType::SetWeightsVersionKey, + current_block, + ); + } + pallet_subtensor::Pallet::::set_weights_version_key(netuid, weights_version_key); log::debug!( "WeightsVersionKeySet( netuid: {:?} weights_version_key: {:?} ) ", @@ -265,7 +292,7 @@ pub mod pallet { } /// The extrinsic sets the weights set rate limit for a subnet. - /// It is only callable by the root account or subnet owner. + /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the weights set rate limit. #[pallet::call_index(7)] #[pallet::weight(::WeightInfo::sudo_set_weights_set_rate_limit())] @@ -274,7 +301,7 @@ pub mod pallet { netuid: u16, weights_set_rate_limit: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + ensure_root(origin)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -500,6 +527,12 @@ pub mod pallet { pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); + + ensure!( + activity_cutoff >= pallet_subtensor::MinActivityCutoff::::get(), + pallet_subtensor::Error::::ActivityCutoffTooLow + ); + pallet_subtensor::Pallet::::set_activity_cutoff(netuid, activity_cutoff); log::debug!( "ActivityCutoffSet( netuid: {:?} activity_cutoff: {:?} ) ", @@ -1414,6 +1447,35 @@ pub mod pallet { ); Ok(()) } + + /// + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `ema_alpha_period` - Number of blocks for EMA price to halve + /// + /// # Errors + /// * `BadOrigin` - If the caller is not the root account. + /// + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(65)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_ema_price_halving_period( + origin: OriginFor, + netuid: u16, + ema_halving: u64, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::EMAPriceHalvingBlocks::::set(netuid, ema_halving); + + log::debug!( + "EMAPriceHalvingBlocks( netuid: {:?}, ema_halving: {:?} )", + netuid, + ema_halving + ); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 0c443255c4..99c11b7165 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -86,7 +86,7 @@ parameter_types! { pub const InitialImmunityPeriod: u16 = 2; pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; - pub const InitialBondsPenalty: u16 = 0; + pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18% honest number. @@ -134,6 +134,8 @@ parameter_types! { pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialTaoWeight: u64 = u64::MAX/10; // 10% global weight. + pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // 7 days } impl pallet_subtensor::Config for Test { @@ -197,6 +199,8 @@ impl pallet_subtensor::Config for Test { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; + type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index ae3aa022cc..2f4c3f2b51 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -5,7 +5,7 @@ use frame_support::{ traits::Hooks, }; use frame_system::Config; -use pallet_subtensor::Error as SubtensorError; +use pallet_subtensor::{Error as SubtensorError, SubnetOwner, Tempo, WeightsVersionKeyRateLimit}; // use pallet_subtensor::{migrations, Event}; use pallet_subtensor::Event; use sp_consensus_grandpa::AuthorityId as GrandpaId; @@ -162,6 +162,107 @@ fn test_sudo_set_weights_version_key() { }); } +#[test] +fn test_sudo_set_weights_version_key_rate_limit() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let to_be_set: u64 = 10; + + let sn_owner = U256::from(1); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + let rate_limit = WeightsVersionKeyRateLimit::::get(); + let tempo: u16 = Tempo::::get(netuid); + + let rate_limit_period = rate_limit * (tempo as u64); + + assert_ok!(AdminUtils::sudo_set_weights_version_key( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + to_be_set + )); + assert_eq!(SubtensorModule::get_weights_version_key(netuid), to_be_set); + + // Try to set again with + // Assert rate limit not passed + assert!(!SubtensorModule::passes_rate_limit_on_subnet( + &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, + &sn_owner, + netuid + )); + + // Try transaction + assert_noop!( + AdminUtils::sudo_set_weights_version_key( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + to_be_set + 1 + ), + pallet_subtensor::Error::::TxRateLimitExceeded + ); + + // Wait for rate limit to pass + run_to_block(rate_limit_period + 2); + assert!(SubtensorModule::passes_rate_limit_on_subnet( + &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, + &sn_owner, + netuid + )); + + // Try transaction + assert_ok!(AdminUtils::sudo_set_weights_version_key( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + to_be_set + 1 + )); + assert_eq!( + SubtensorModule::get_weights_version_key(netuid), + to_be_set + 1 + ); + }); +} + +#[test] +fn test_sudo_set_weights_version_key_rate_limit_root() { + // root should not be effected by rate limit + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let to_be_set: u64 = 10; + + let sn_owner = U256::from(1); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + let rate_limit = WeightsVersionKeyRateLimit::::get(); + let tempo: u16 = Tempo::::get(netuid); + + let rate_limit_period = rate_limit * (tempo as u64); + // Verify the rate limit is more than 0 blocks + assert!(rate_limit_period > 0); + + assert_ok!(AdminUtils::sudo_set_weights_version_key( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + assert_eq!(SubtensorModule::get_weights_version_key(netuid), to_be_set); + + // Try transaction + assert_ok!(AdminUtils::sudo_set_weights_version_key( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + to_be_set + 1 + )); + assert_eq!( + SubtensorModule::get_weights_version_key(netuid), + to_be_set + 1 + ); + }); +} + #[test] fn test_sudo_set_weights_set_rate_limit() { new_test_ext().execute_with(|| { @@ -546,7 +647,7 @@ fn test_sudo_set_rho() { fn test_sudo_set_activity_cutoff() { new_test_ext().execute_with(|| { let netuid: u16 = 1; - let to_be_set: u16 = 10; + let to_be_set: u16 = pallet_subtensor::MinActivityCutoff::::get(); add_network(netuid, 10); let init_value: u16 = SubtensorModule::get_activity_cutoff(netuid); assert_eq!( @@ -1568,3 +1669,45 @@ fn test_sudo_set_subnet_owner_hotkey() { ); }); } + +// cargo test --package pallet-admin-utils --lib -- tests::test_sudo_set_ema_halving --exact --show-output +#[test] +fn test_sudo_set_ema_halving() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let to_be_set: u64 = 10; + add_network(netuid, 10); + + let value_before: u64 = pallet_subtensor::EMAPriceHalvingBlocks::::get(netuid); + assert_eq!( + AdminUtils::sudo_set_ema_price_halving_period( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + to_be_set + ), + Err(DispatchError::BadOrigin) + ); + let value_after_0: u64 = pallet_subtensor::EMAPriceHalvingBlocks::::get(netuid); + assert_eq!(value_after_0, value_before); + + let owner = U256::from(10); + pallet_subtensor::SubnetOwner::::insert(netuid, owner); + assert_eq!( + AdminUtils::sudo_set_ema_price_halving_period( + <::RuntimeOrigin>::signed(owner), + netuid, + to_be_set + ), + Err(DispatchError::BadOrigin) + ); + let value_after_1: u64 = pallet_subtensor::EMAPriceHalvingBlocks::::get(netuid); + assert_eq!(value_after_1, value_before); + assert_ok!(AdminUtils::sudo_set_ema_price_halving_period( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + let value_after_2: u64 = pallet_subtensor::EMAPriceHalvingBlocks::::get(netuid); + assert_eq!(value_after_2, to_be_set); + }); +} diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index 7fb22aa1fb..7b2f49ace8 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -29,6 +29,18 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } enumflags2 = { workspace = true } +pallet-drand = { path = "../drand", default-features = false } +tle = { workspace = true, default-features = false } +ark-serialize = { workspace = true, default-features = false } +w3f-bls = { workspace = true, default-features = false } +rand_chacha = { workspace = true } +hex = { workspace = true } +sha2 = { workspace = true } + +log = { workspace = true } + +pallet-subtensor = { path = "../subtensor", default-features = false } + [dev-dependencies] sp-core = { workspace = true } sp-io = { workspace = true } @@ -47,18 +59,31 @@ std = [ "enumflags2/std", "pallet-balances/std", "sp-core/std", - "sp-io/std" + "sp-io/std", + "ark-serialize/std", + "log/std", + "pallet-drand/std", + "tle/std", + "w3f-bls/std", + "hex/std", + "rand_chacha/std", + "sha2/std", + "pallet-subtensor/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-balances/runtime-benchmarks" + "pallet-balances/runtime-benchmarks", + "pallet-drand/runtime-benchmarks", + "pallet-subtensor/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", - "sp-runtime/try-runtime" + "sp-runtime/try-runtime", + "pallet-drand/try-runtime", + "pallet-subtensor/try-runtime" ] diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index ba11dbe52a..11e1ae76ee 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -4,6 +4,9 @@ mod benchmarking; #[cfg(test)] mod tests; +#[cfg(test)] +mod mock; + pub mod types; pub mod weights; @@ -12,9 +15,18 @@ use subtensor_macros::freeze_struct; pub use types::*; pub use weights::WeightInfo; -use frame_support::traits::Currency; +use ark_serialize::CanonicalDeserialize; +use frame_support::{BoundedVec, traits::Currency}; +use scale_info::prelude::collections::BTreeSet; +use sp_runtime::SaturatedConversion; use sp_runtime::{Saturating, traits::Zero}; -use sp_std::boxed::Box; +use sp_std::{boxed::Box, vec::Vec}; +use tle::{ + curves::drand::TinyBLS381, + stream_ciphers::AESGCMStreamCipherProvider, + tlock::{TLECiphertext, tld}, +}; +use w3f_bls::EngineBLS; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -31,7 +43,7 @@ pub mod pallet { // Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] - pub trait Config: frame_system::Config { + pub trait Config: frame_system::Config + pallet_drand::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -46,7 +58,7 @@ pub mod pallet { /// The maximum number of additional fields that can be added to a commitment #[pallet::constant] - type MaxFields: Get; + type MaxFields: Get + TypeInfo + 'static; /// The amount held on deposit for a registered identity #[pallet::constant] @@ -59,6 +71,15 @@ pub mod pallet { /// The rate limit for commitments #[pallet::constant] type DefaultRateLimit: Get>; + + /// Used to retreive the given subnet's tempo + type TempoInterface: GetTempoInterface; + } + + /// Used to retreive the given subnet's tempo + pub trait GetTempoInterface { + /// Used to retreive the epoch index for the given subnet. + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64; } #[pallet::event] @@ -71,6 +92,22 @@ pub mod pallet { /// The account who: T::AccountId, }, + /// A timelock-encrypted commitment was set + TimelockCommitment { + /// The netuid of the commitment + netuid: u16, + /// The account + who: T::AccountId, + /// The drand round to reveal + reveal_round: u64, + }, + /// A timelock-encrypted commitment was auto-revealed + CommitmentRevealed { + /// The netuid of the commitment + netuid: u16, + /// The account + who: T::AccountId, + }, } #[pallet::error] @@ -81,18 +118,28 @@ pub mod pallet { AccountNotAllowedCommit, /// Account is trying to commit data too fast, rate limit exceeded CommitmentSetRateLimitExceeded, + /// Space Limit Exceeded for the current interval + SpaceLimitExceeded, + /// Indicates that unreserve returned a leftover, which is unexpected. + UnexpectedUnreserveLeftover, } #[pallet::type_value] - /// Default value for commitment rate limit. + /// *DEPRECATED* Default value for commitment rate limit. pub fn DefaultRateLimit() -> BlockNumberFor { T::DefaultRateLimit::get() } - /// The rate limit for commitments + /// *DEPRECATED* The rate limit for commitments #[pallet::storage] pub type RateLimit = StorageValue<_, BlockNumberFor, ValueQuery, DefaultRateLimit>; + /// Tracks all CommitmentOf that have at least one timelocked field. + #[pallet::storage] + #[pallet::getter(fn timelocked_index)] + pub type TimelockedIndex = + StorageValue<_, BTreeSet<(u16, T::AccountId)>, ValueQuery>; + /// Identity data by account #[pallet::storage] #[pallet::getter(fn commitment_of)] @@ -117,16 +164,44 @@ pub mod pallet { BlockNumberFor, OptionQuery, >; + #[pallet::storage] + #[pallet::getter(fn revealed_commitments)] + pub(super) type RevealedCommitments = StorageDoubleMap< + _, + Identity, + u16, + Twox64Concat, + T::AccountId, + Vec<(Vec, u64)>, // Reveals<(Data, RevealBlock)> + OptionQuery, + >; + + /// Maps (netuid, who) -> usage (how many “bytes” they've committed) + /// in the RateLimit window + #[pallet::storage] + #[pallet::getter(fn used_space_of)] + pub type UsedSpaceOf = + StorageDoubleMap<_, Identity, u16, Twox64Concat, T::AccountId, UsageTracker, OptionQuery>; + + #[pallet::type_value] + /// The default Maximum Space + pub fn DefaultMaxSpace() -> u32 { + 3100 + } + + #[pallet::storage] + #[pallet::getter(fn max_space_per_user_per_rate_limit)] + pub type MaxSpace = StorageValue<_, u32, ValueQuery, DefaultMaxSpace>; #[pallet::call] impl Pallet { /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( - T::WeightInfo::set_commitment(), - DispatchClass::Operational, - Pays::No - ))] + ::WeightInfo::set_commitment(), + DispatchClass::Operational, + Pays::No + ))] pub fn set_commitment( origin: OriginFor, netuid: u16, @@ -145,28 +220,49 @@ pub mod pallet { ); let cur_block = >::block_number(); - if let Some(last_commit) = >::get(netuid, &who) { - ensure!( - cur_block >= last_commit.saturating_add(RateLimit::::get()), - Error::::CommitmentSetRateLimitExceeded - ); + + let min_used_space: u64 = 100; + let required_space: u64 = info + .fields + .iter() + .map(|field| field.len_for_rate_limit()) + .sum::() + .max(min_used_space); + + let mut usage = UsedSpaceOf::::get(netuid, &who).unwrap_or_default(); + let cur_block_u64 = cur_block.saturated_into::(); + let current_epoch = T::TempoInterface::get_epoch_index(netuid, cur_block_u64); + + if usage.last_epoch != current_epoch { + usage.last_epoch = current_epoch; + usage.used_space = 0; } - let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); + let max_allowed = MaxSpace::::get() as u64; + ensure!( + usage.used_space.saturating_add(required_space) <= max_allowed, + Error::::SpaceLimitExceeded + ); + + usage.used_space = usage.used_space.saturating_add(required_space); + + UsedSpaceOf::::insert(netuid, &who, usage); + let mut id = match >::get(netuid, &who) { Some(mut id) => { - id.info = *info; + id.info = *info.clone(); id.block = cur_block; id } None => Registration { - info: *info, + info: *info.clone(), block: cur_block, deposit: Zero::zero(), }, }; let old_deposit = id.deposit; + let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); id.deposit = T::InitialDeposit::get().saturating_add(fd); if id.deposit > old_deposit { T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; @@ -174,12 +270,38 @@ pub mod pallet { if old_deposit > id.deposit { let err_amount = T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); - debug_assert!(err_amount.is_zero()); + if !err_amount.is_zero() { + return Err(Error::::UnexpectedUnreserveLeftover.into()); + } } >::insert(netuid, &who, id); >::insert(netuid, &who, cur_block); - Self::deposit_event(Event::Commitment { netuid, who }); + + if let Some(Data::TimelockEncrypted { reveal_round, .. }) = info + .fields + .iter() + .find(|data| matches!(data, Data::TimelockEncrypted { .. })) + { + Self::deposit_event(Event::TimelockCommitment { + netuid, + who: who.clone(), + reveal_round: *reveal_round, + }); + + TimelockedIndex::::mutate(|index| { + index.insert((netuid, who.clone())); + }); + } else { + Self::deposit_event(Event::Commitment { + netuid, + who: who.clone(), + }); + + TimelockedIndex::::mutate(|index| { + index.remove(&(netuid, who.clone())); + }); + } Ok(()) } @@ -187,7 +309,7 @@ pub mod pallet { /// Sudo-set the commitment rate limit #[pallet::call_index(1)] #[pallet::weight(( - T::WeightInfo::set_rate_limit(), + ::WeightInfo::set_rate_limit(), DispatchClass::Operational, Pays::No ))] @@ -196,6 +318,33 @@ pub mod pallet { RateLimit::::set(rate_limit_blocks.into()); Ok(()) } + + /// Sudo-set MaxSpace + #[pallet::call_index(2)] + #[pallet::weight(( + ::WeightInfo::set_rate_limit(), + DispatchClass::Operational, + Pays::No + ))] + pub fn set_max_space(origin: OriginFor, new_limit: u32) -> DispatchResult { + ensure_root(origin)?; + MaxSpace::::set(new_limit); + Ok(()) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: BlockNumberFor) -> Weight { + if let Err(e) = Self::reveal_timelocked_commitments() { + log::debug!( + "Failed to unveil matured commitments on block {:?}: {:?}", + n, + e + ); + } + Weight::from_parts(0, 0) + } } } @@ -328,3 +477,150 @@ where Ok(()) } } + +impl Pallet { + pub fn reveal_timelocked_commitments() -> DispatchResult { + let index = TimelockedIndex::::get(); + for (netuid, who) in index.clone() { + let Some(mut registration) = >::get(netuid, &who) else { + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + continue; + }; + + let original_fields = registration.info.fields.clone(); + let mut remain_fields = Vec::new(); + let mut revealed_fields = Vec::new(); + + for data in original_fields { + match data { + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + let pulse = match pallet_drand::Pulses::::get(reveal_round) { + Some(p) => p, + None => { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + let sig_reader = &mut &signature_bytes[..]; + let sig = + ::SignatureGroup::deserialize_compressed( + sig_reader, + ) + .map_err(|e| { + log::warn!( + "Failed to deserialize drand signature for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let Some(sig) = sig else { + log::warn!("No sig after deserialization"); + continue; + }; + + let reader = &mut &encrypted[..]; + let commit = TLECiphertext::::deserialize_compressed(reader) + .map_err(|e| { + log::warn!( + "Failed to deserialize TLECiphertext for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let Some(commit) = commit else { + log::warn!("No commit after deserialization"); + continue; + }; + + let decrypted_bytes: Vec = + tld::(commit, sig) + .map_err(|e| { + log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) + }) + .ok() + .unwrap_or_default(); + + if decrypted_bytes.is_empty() { + log::warn!("Bytes were decrypted for {:?} but they are empty", who); + continue; + } + + revealed_fields.push(decrypted_bytes); + } + + other => remain_fields.push(other), + } + } + + if !revealed_fields.is_empty() { + let mut existing_reveals = + RevealedCommitments::::get(netuid, &who).unwrap_or_default(); + + let current_block = >::block_number(); + let block_u64 = current_block.saturated_into::(); + + // Push newly revealed items onto the tail of existing_reveals and emit the event + for revealed_bytes in revealed_fields { + existing_reveals.push((revealed_bytes, block_u64)); + + Self::deposit_event(Event::CommitmentRevealed { + netuid, + who: who.clone(), + }); + } + + const MAX_REVEALS: usize = 10; + if existing_reveals.len() > MAX_REVEALS { + let remove_count = existing_reveals.len().saturating_sub(MAX_REVEALS); + existing_reveals.drain(0..remove_count); + } + + RevealedCommitments::::insert(netuid, &who, existing_reveals); + } + + registration.info.fields = BoundedVec::try_from(remain_fields) + .map_err(|_| "Failed to build BoundedVec for remain_fields")?; + + match registration.info.fields.is_empty() { + true => { + >::remove(netuid, &who); + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + } + false => { + >::insert(netuid, &who, ®istration); + let has_timelock = registration + .info + .fields + .iter() + .any(|f| matches!(f, Data::TimelockEncrypted { .. })); + if !has_timelock { + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + } + } + } + } + + Ok(()) + } +} diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 8866e1c0d5..c8f6b1e1b2 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -1,22 +1,33 @@ use crate as pallet_commitments; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + pallet_prelude::{Get, TypeInfo}, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, BuildStorage, + testing::Header, + traits::{BlakeTwo256, ConstU16, IdentityLookup}, }; -type Block = frame_system::mocking::MockBlock; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; -// Configure a mock runtime to test the pallet. frame_support::construct_runtime!( pub enum Test { System: frame_system = 1, - Commitments: pallet_commitments = 2, + Balances: pallet_balances = 2, + Commitments: pallet_commitments = 3, + Drand: pallet_drand = 4, } ); +pub type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -24,36 +35,239 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; - type AccountData = (); + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = ConstU16<42>; type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; + type Block = Block; + type Nonce = u32; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct TestMaxFields; +impl Get for TestMaxFields { + fn get() -> u32 { + 16 + } +} +impl TypeInfo for TestMaxFields { + type Identity = Self; + fn type_info() -> scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("TestMaxFields", module_path!())) + .composite(scale_info::build::Fields::unit()) + } +} + +pub struct TestCanCommit; +impl pallet_commitments::CanCommit for TestCanCommit { + fn can_commit(_netuid: u16, _who: &u64) -> bool { + true + } } impl pallet_commitments::Config for Test { type RuntimeEvent = RuntimeEvent; + type Currency = Balances; type WeightInfo = (); - type MaxAdditionalFields = frame_support::traits::ConstU32<16>; - type CanRegisterIdentity = (); + type MaxFields = TestMaxFields; + type CanCommit = TestCanCommit; + type FieldDeposit = ConstU64<0>; + type InitialDeposit = ConstU64<0>; + type DefaultRateLimit = ConstU64<0>; + type TempoInterface = MockTempoInterface; +} + +pub struct MockTempoInterface; +impl pallet_commitments::GetTempoInterface for MockTempoInterface { + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + let tempo = 360; // TODO: configure SubtensorModule in this mock + let tempo_plus_one: u64 = tempo.saturating_add(1); + let netuid_plus_one: u64 = (netuid as u64).saturating_add(1); + let block_with_offset: u64 = cur_block.saturating_add(netuid_plus_one); + + block_with_offset.checked_div(tempo_plus_one).unwrap_or(0) + } +} + +impl pallet_drand::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_drand::weights::SubstrateWeight; + type AuthorityId = test_crypto::TestAuthId; + type Verifier = pallet_drand::verifier::QuicknetVerifier; + type UnsignedPriority = ConstU64<{ 1 << 20 }>; + type HttpFetchTimeout = ConstU64<1_000>; +} + +pub mod test_crypto { + use sp_core::sr25519::{Public as Sr25519Public, Signature as Sr25519Signature}; + use sp_runtime::{ + app_crypto::{app_crypto, sr25519}, + traits::IdentifyAccount, + }; + + pub const KEY_TYPE: sp_runtime::KeyTypeId = sp_runtime::KeyTypeId(*b"test"); + + app_crypto!(sr25519, KEY_TYPE); + + pub struct TestAuthId; + + impl frame_system::offchain::AppCrypto for TestAuthId { + type RuntimeAppPublic = Public; + type GenericSignature = Sr25519Signature; + type GenericPublic = Sr25519Public; + } + + impl IdentifyAccount for Public { + type AccountId = u64; + + fn into_account(self) -> u64 { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(self.as_ref()); + u64::from_le_bytes(bytes[..8].try_into().expect("Expected to not panic")) + } + } +} + +impl frame_system::offchain::SigningTypes for Test { + type Public = test_crypto::Public; + type Signature = test_crypto::Signature; +} + +impl frame_system::offchain::CreateSignedTransaction> for Test { + fn create_transaction>( + call: RuntimeCall, + _public: Self::Public, + account: Self::AccountId, + _nonce: u32, + ) -> Option<( + RuntimeCall, + ::SignaturePayload, + )> { + // Create a dummy sr25519 signature from a raw byte array + let dummy_raw = [0u8; 64]; + let dummy_signature = sp_core::sr25519::Signature::from(dummy_raw); + let signature = test_crypto::Signature::from(dummy_signature); + Some((call, (account, signature, ()))) + } +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; } -// Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::::default() + let t = frame_system::GenesisConfig::::default() .build_storage() - .unwrap() - .into() + .expect("Expected to not panic"); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +use super::*; +use crate::{EngineBLS, MAX_TIMELOCK_COMMITMENT_SIZE_BYTES, TinyBLS381}; +use ark_serialize::CanonicalSerialize; +use frame_support::BoundedVec; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use tle::{ibe::fullident::Identity, stream_ciphers::AESGCMStreamCipherProvider, tlock::tle}; + +// Drand Quicknet public key and signature for round=1000: +pub const DRAND_QUICKNET_PUBKEY_HEX: &str = "83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6\ + a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809b\ + d274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a"; +pub const DRAND_QUICKNET_SIG_2000_HEX: &str = "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; // round 20000 +pub const DRAND_QUICKNET_SIG_HEX: &str = "b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39"; + +/// Inserts a Drand pulse for `round` with the given `signature_bytes`. +pub fn insert_drand_pulse(round: u64, signature_bytes: &[u8]) { + let sig_bounded: BoundedVec> = signature_bytes + .to_vec() + .try_into() + .expect("Signature within 144 bytes"); + + let randomness_bounded: BoundedVec> = vec![0u8; 32] + .try_into() + .expect("Randomness must be exactly 32 bytes"); + + pallet_drand::Pulses::::insert( + round, + pallet_drand::types::Pulse { + round, + randomness: randomness_bounded, + signature: sig_bounded, + }, + ); +} + +/// Produces a **real** ciphertext by TLE-encrypting `plaintext` for Drand Quicknet `round`. +/// +/// The returned `BoundedVec>` +/// will decrypt if you pass in the valid signature for the same round. +pub fn produce_ciphertext( + plaintext: &[u8], + round: u64, +) -> BoundedVec> { + // 1) Deserialize the known Drand Quicknet public key: + let pub_key_bytes = hex::decode(DRAND_QUICKNET_PUBKEY_HEX).expect("decode pubkey"); + let pub_key = + ::PublicKeyGroup::deserialize_compressed(&pub_key_bytes[..]) + .expect("bad pubkey bytes"); + + // 2) Prepare the identity for that round + // by hashing round.to_be_bytes() with SHA256: + let msg = { + let mut hasher = sha2::Sha256::new(); + hasher.update(round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![msg]); + + // 3) Actually encrypt + // (just an example ephemeral secret key & RNG seed) + let esk = [2u8; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + + let ct = tle::( + pub_key, esk, plaintext, identity, rng, + ) + .expect("Encryption failed in produce_real_ciphertext"); + + // 4) Serialize the ciphertext to BoundedVec + let mut ct_bytes = Vec::new(); + ct.serialize_compressed(&mut ct_bytes) + .expect("serialize TLECiphertext"); + + ct_bytes.try_into().expect("Ciphertext is within max size") } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 15675d8ad8..c9b14d188b 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1,100 +1,1793 @@ -#![allow(non_camel_case_types)] - -use crate as pallet_commitments; -use frame_support::derive_impl; -use frame_support::traits::ConstU64; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, ConstU16, IdentityLookup}, +use codec::Encode; +use sp_std::prelude::*; + +#[cfg(test)] +use crate::{ + CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, RateLimit, + Registration, RevealedCommitments, TimelockedIndex, UsedSpaceOf, + mock::{ + Balances, DRAND_QUICKNET_SIG_2000_HEX, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, + Test, TestMaxFields, insert_drand_pulse, new_test_ext, produce_ciphertext, + }, +}; +use frame_support::pallet_prelude::Hooks; +use frame_support::{ + BoundedVec, assert_noop, assert_ok, + traits::{Currency, Get, ReservableCurrency}, }; +use frame_system::{Pallet as System, RawOrigin}; + +#[allow(clippy::indexing_slicing)] +#[test] +fn manual_data_type_info() { + let mut registry = scale_info::Registry::new(); + let type_id = registry.register_type(&scale_info::meta_type::()); + let registry: scale_info::PortableRegistry = registry.into(); + let type_info = registry.resolve(type_id.id).expect("Expected not to panic"); + + let check_type_info = |data: &Data| { + let variant_name = match data { + Data::None => "None".to_string(), + Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), + Data::Sha256(_) => "Sha256".to_string(), + Data::Keccak256(_) => "Keccak256".to_string(), + Data::ShaThree256(_) => "ShaThree256".to_string(), + Data::Raw(bytes) => format!("Raw{}", bytes.len()), + Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), + }; + if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { + let variant = variant + .variants + .iter() + .find(|v| v.name == variant_name) + .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); + + let encoded = data.encode(); + assert_eq!(encoded[0], variant.index); + + // For variants with fields, check the encoded length matches expected field lengths + if !variant.fields.is_empty() { + let expected_len = match data { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u32, + Data::BlakeTwo256(_) + | Data::Sha256(_) + | Data::Keccak256(_) + | Data::ShaThree256(_) => 32, + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + // Calculate length: encrypted (length prefixed) + reveal_round (u64) + let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix + let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes + encrypted_len + reveal_round_len + } + }; + assert_eq!( + encoded.len() as u32 - 1, // Subtract variant byte + expected_len, + "Encoded length mismatch for variant {}", + variant_name + ); + } else { + assert_eq!( + encoded.len() as u32 - 1, + 0, + "Expected no fields for {}", + variant_name + ); + } + } else { + panic!("Should be a variant type"); + } + }; + + let mut data = vec![ + Data::None, + Data::BlakeTwo256(Default::default()), + Data::Sha256(Default::default()), + Data::Keccak256(Default::default()), + Data::ShaThree256(Default::default()), + ]; + + // Add Raw instances for all possible sizes + for n in 0..128 { + data.push(Data::Raw( + vec![0u8; n as usize] + .try_into() + .expect("Expected not to panic"), + )); + } -pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + // Add a TimelockEncrypted instance + data.push(Data::TimelockEncrypted { + encrypted: vec![0u8; 64].try_into().expect("Expected not to panic"), + reveal_round: 12345, + }); -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system = 1, - Balances: pallet_balances = 2, - Commitments: pallet_commitments = 3, + for d in data.iter() { + check_type_info(d); } -); - -#[allow(dead_code)] -pub type AccountId = u64; - -// The address format for describing accounts. -#[allow(dead_code)] -pub type Address = AccountId; - -// Balance of an account. -#[allow(dead_code)] -pub type Balance = u64; - -// An index to a block. -#[allow(dead_code)] -pub type BlockNumber = u64; - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - type Block = Block; - type Nonce = u64; -} - -impl pallet_commitments::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type WeightInfo = (); - type MaxFields = frame_support::traits::ConstU32<16>; - type CanCommit = (); - type FieldDeposit = frame_support::traits::ConstU64<0>; - type InitialDeposit = frame_support::traits::ConstU64<0>; - type DefaultRateLimit = frame_support::traits::ConstU64<0>; -} - -// // Build genesis storage according to the mock runtime. -// pub fn new_test_ext() -> sp_io::TestExternalities { -// let t = frame_system::GenesisConfig::::default() -// .build_storage() -// .unwrap(); -// let mut ext = sp_io::TestExternalities::new(t); -// ext.execute_with(|| System::set_block_number(1)); -// ext +} + +#[test] +fn set_commitment_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + let commitment = Pallet::::commitment_of(1, 1).expect("Expected not to panic"); + let initial_deposit: u64 = ::InitialDeposit::get(); + assert_eq!(commitment.deposit, initial_deposit); + assert_eq!(commitment.block, 1); + assert_eq!(Pallet::::last_commitment(1, 1), Some(1)); + }); +} + +#[test] +#[should_panic(expected = "BoundedVec::try_from failed")] +fn set_commitment_too_many_fields_panics() { + new_test_ext().execute_with(|| { + let max_fields: u32 = ::MaxFields::get(); + let fields = vec![Data::None; (max_fields + 1) as usize]; + + // This line will panic when 'BoundedVec::try_from(...)' sees too many items. + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), + }); + + // We never get here, because the constructor panics above. + let _ = Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); + }); +} + +// DEPRECATED +// #[test] +// fn set_commitment_rate_limit_exceeded() { +// new_test_ext().execute_with(|| { +// let rate_limit = ::DefaultRateLimit::get(); +// System::::set_block_number(1); +// let info = Box::new(CommitmentInfo { +// fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), +// }); + +// assert_ok!(Pallet::::set_commitment( +// RuntimeOrigin::signed(1), +// 1, +// info.clone() +// )); + +// // Set block number to just before rate limit expires +// System::::set_block_number(rate_limit); +// assert_noop!( +// Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), +// Error::::CommitmentSetRateLimitExceeded +// ); + +// // Set block number to after rate limit +// System::::set_block_number(rate_limit + 1); +// assert_ok!(Pallet::::set_commitment( +// RuntimeOrigin::signed(1), +// 1, +// info +// )); +// }); // } + +#[test] +fn set_commitment_updates_deposit() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info1 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 2]) + .expect("Expected not to panic"), + }); + let info2 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 3]) + .expect("Expected not to panic"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info1 + )); + let initial_deposit: u64 = ::InitialDeposit::get(); + let field_deposit: u64 = ::FieldDeposit::get(); + let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, 1) + .expect("Expected not to panic") + .deposit, + expected_deposit1 + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info2 + )); + let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, 1) + .expect("Expected not to panic") + .deposit, + expected_deposit2 + ); + }); +} + +#[test] +fn set_rate_limit_works() { + new_test_ext().execute_with(|| { + let default_rate_limit: u64 = ::DefaultRateLimit::get(); + assert_eq!(RateLimit::::get(), default_rate_limit); + + assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); + assert_eq!(RateLimit::::get(), 200); + + assert_noop!( + Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), + sp_runtime::DispatchError::BadOrigin + ); + }); +} + +#[test] +fn event_emission_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + + let events = System::::events(); + assert!(events.iter().any(|e| matches!( + &e.event, + RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) + ))); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn happy_path_timelock_commitments() { + new_test_ext().execute_with(|| { + let message_text = b"Hello timelock only!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes for Raw variant"), + ); + let fields_vec = vec![data_raw]; + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(fields_vec).expect("Too many fields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let data = Data::TimelockEncrypted { + encrypted: encrypted.clone(), + reveal_round, + }; + + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Too many fields"); + let info_outer = CommitmentInfo { + fields: fields_outer, + }; + + let who = 123; + let netuid = 42; + System::::set_block_number(1); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = + hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let revealed = + RevealedCommitments::::get(netuid, who).expect("Should have revealed data"); + + let (revealed_bytes, _reveal_block) = revealed[0].clone(); + + let revealed_str = sp_std::str::from_utf8(&revealed_bytes) + .expect("Expected valid UTF-8 in the revealed bytes for this test"); + + let original_str = + sp_std::str::from_utf8(message_text).expect("`message_text` is valid UTF-8"); + assert!( + revealed_str.contains(original_str), + "Revealed data must contain the original message text." + ); + }); +} + +#[test] +fn reveal_timelocked_commitment_missing_round_does_nothing() { + new_test_ext().execute_with(|| { + let who = 1; + let netuid = 2; + System::::set_block_number(5); + let ciphertext = produce_ciphertext(b"My plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: ciphertext, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + System::::set_block_number(100_000); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_commitment_cant_deserialize_ciphertext() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 9; + System::::set_block_number(10); + let good_ct = produce_ciphertext(b"Some data", 1000); + let mut corrupted = good_ct.into_inner(); + if !corrupted.is_empty() { + corrupted[0] = 0xFF; + } + let corrupted_ct = BoundedVec::try_from(corrupted).expect("Expected not to panic"); + let data = Data::TimelockEncrypted { + encrypted: corrupted_ct, + reveal_round: 1000, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(1000, &sig_bytes); + System::::set_block_number(99999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_bad_signature_skips_decryption() { + new_test_ext().execute_with(|| { + let who = 10; + let netuid = 11; + System::::set_block_number(15); + let real_ct = produce_ciphertext(b"A valid plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let bad_signature = [0x33u8; 10]; + insert_drand_pulse(1000, &bad_signature); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_empty_decrypted_data_is_skipped() { + new_test_ext().execute_with(|| { + let who = 2; + let netuid = 3; + let commit_block = 100u64; + System::::set_block_number(commit_block); + let reveal_round = 1000; + let empty_ct = produce_ciphertext(&[], reveal_round); + let data = Data::TimelockEncrypted { + encrypted: empty_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_commitment_single_field_entry_is_removed_after_reveal() { + new_test_ext().execute_with(|| { + let message_text = b"Single field timelock test!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("Message must be <=128 bytes for Raw variant"), + ); + + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data_raw]).expect("BoundedVec creation must not fail"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let timelock_data = Data::TimelockEncrypted { + encrypted, + reveal_round, + }; + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![timelock_data]).expect("Too many fields"); + let info_outer: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_outer, + }; + + let who = 555; + let netuid = 777; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX) + .expect("Must decode DRAND_QUICKNET_SIG_HEX successfully"); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let revealed = + RevealedCommitments::::get(netuid, who).expect("Expected to find revealed data"); + let (revealed_bytes, _reveal_block) = revealed[0].clone(); + + // The decrypted bytes have some extra SCALE metadata in front: + // we slice off the first two bytes before checking the string. + let offset = 2; + let truncated = &revealed_bytes[offset..]; + let revealed_str = sp_std::str::from_utf8(truncated) + .expect("Truncated bytes should be valid UTF-8 in this test"); + + let original_str = + sp_std::str::from_utf8(message_text).expect("`message_text` should be valid UTF-8"); + assert_eq!( + revealed_str, original_str, + "Expected the revealed data (minus prefix) to match the original message" + ); + assert!( + crate::CommitmentOf::::get(netuid, who).is_none(), + "Expected CommitmentOf entry to be removed after reveal" + ); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_multiple_fields_only_correct_ones_removed() { + new_test_ext().execute_with(|| { + let round_1000 = 1000; + + // 2) Build two CommitmentInfos, one for each timelock + let msg_1 = b"Hello from TLE #1"; + let inner_1_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + msg_1.to_vec().try_into().expect("expected not to panic"), + )]) + .expect("BoundedVec of size 1"); + let inner_info_1 = CommitmentInfo { + fields: inner_1_fields, + }; + let encoded_1 = inner_info_1.encode(); + let ciphertext_1 = produce_ciphertext(&encoded_1, round_1000); + let timelock_1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + let msg_2 = b"Hello from TLE #2"; + let inner_2_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + msg_2.to_vec().try_into().expect("expected not to panic"), + )]) + .expect("BoundedVec of size 1"); + let inner_info_2 = CommitmentInfo { + fields: inner_2_fields, + }; + let encoded_2 = inner_info_2.encode(); + let ciphertext_2 = produce_ciphertext(&encoded_2, round_1000); + let timelock_2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_1000, + }; + + // 3) One plain Data::Raw field (non-timelocked) + let raw_bytes = b"Plain non-timelocked data"; + let data_raw = Data::Raw( + raw_bytes + .to_vec() + .try_into() + .expect("expected not to panic"), + ); + + // 4) Outer commitment: 3 fields total => [Raw, TLE #1, TLE #2] + let outer_fields = BoundedVec::try_from(vec![ + data_raw.clone(), + timelock_1.clone(), + timelock_2.clone(), + ]) + .expect("T::MaxFields >= 3 in the test config, or at least 3 here"); + let outer_info = CommitmentInfo { + fields: outer_fields, + }; + + // 5) Insert the commitment + let who = 123; + let netuid = 999; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(outer_info) + )); + let initial = Pallet::::commitment_of(netuid, who).expect("Must exist"); + assert_eq!(initial.info.fields.len(), 3, "3 fields inserted"); + + // 6) Insert Drand signature for round=1000 + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode DRAND sig"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + // 7) Reveal once + System::::set_block_number(50); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // => The pallet code has removed *both* TLE #1 and TLE #2 in this single call! + let after_reveal = Pallet::::commitment_of(netuid, who) + .expect("Should still exist with leftover fields"); + // Only the raw, non-timelocked field remains + assert_eq!( + after_reveal.info.fields.len(), + 1, + "Both timelocks referencing round=1000 got removed at once" + ); + assert_eq!( + after_reveal.info.fields[0], data_raw, + "Only the raw field is left" + ); + + // 8) Check revealed data + let revealed_data = RevealedCommitments::::get(netuid, who) + .expect("Expected revealed data for TLE #1 and #2"); + + let (revealed_bytes1, reveal_block1) = revealed_data[0].clone(); + let (revealed_bytes2, reveal_block2) = revealed_data[1].clone(); + + let truncated1 = &revealed_bytes1[2..]; + let truncated2 = &revealed_bytes2[2..]; + + assert_eq!(truncated1, msg_1); + assert_eq!(reveal_block1, 50); + assert_eq!(truncated2, msg_2); + assert_eq!(reveal_block2, 50); + + // 9) A second reveal call now does nothing, because no timelocks remain + System::::set_block_number(51); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let after_second = Pallet::::commitment_of(netuid, who).expect("Still must exist"); + assert_eq!( + after_second.info.fields.len(), + 1, + "No new fields were removed, because no timelocks remain" + ); + }); +} + +#[test] +fn test_index_lifecycle_no_timelocks_updates_in_out() { + new_test_ext().execute_with(|| { + let netuid = 100; + let who = 999; + + // + // A) Create a commitment with **no** timelocks => shouldn't be in index + // + let no_tl_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![]).expect("Empty is ok"); + let info_no_tl = CommitmentInfo { + fields: no_tl_fields, + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_no_tl) + )); + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "User with no timelocks must not appear in index" + ); + + // + // B) Update the commitment to have a timelock => enters index + // + let tl_fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![Data::TimelockEncrypted { + encrypted: Default::default(), + reveal_round: 1234, + }]) + .expect("Expected success"); + let info_with_tl = CommitmentInfo { fields: tl_fields }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_with_tl) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "User must appear in index after adding a timelock" + ); + + // + // C) Remove the timelock => leaves index + // + let back_to_no_tl: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![]).expect("Expected success"); + let info_remove_tl = CommitmentInfo { + fields: back_to_no_tl, + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_remove_tl) + )); + + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "User must be removed from index after losing all timelocks" + ); + }); +} + +#[test] +fn two_timelocks_partial_then_full_reveal() { + new_test_ext().execute_with(|| { + let netuid_a = 1; + let who_a = 10; + let round_1000 = 1000; + let round_2000 = 2000; + + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected success"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + + // + // First Timelock => round=1000 + // + let msg_a1 = b"UserA timelock #1 (round=1000)"; + let inner_1_fields: BoundedVec::MaxFields> = BoundedVec::try_from( + vec![Data::Raw(msg_a1.to_vec().try_into().expect("Expected success"))], + ) + .expect("MaxFields >= 1"); + let inner_info_1: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_1_fields, + }; + let encoded_1 = inner_info_1.encode(); + let ciphertext_1 = produce_ciphertext(&encoded_1, round_1000); + let tle_a1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + // + // Second Timelock => round=2000 + // + let msg_a2 = b"UserA timelock #2 (round=2000)"; + let inner_2_fields: BoundedVec::MaxFields> = BoundedVec::try_from( + vec![Data::Raw(msg_a2.to_vec().try_into().expect("Expected success"))], + ) + .expect("MaxFields >= 1"); + let inner_info_2: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_2_fields, + }; + let encoded_2 = inner_info_2.encode(); + let ciphertext_2 = produce_ciphertext(&encoded_2, round_2000); + let tle_a2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_2000, + }; + + // + // Insert outer commitment with both timelocks + // + let fields_a: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![tle_a1, tle_a2]).expect("2 fields, must be <= MaxFields"); + let info_a: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: fields_a }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who_a), + netuid_a, + Box::new(info_a) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "User A must be in index with 2 timelocks" + ); + + System::::set_block_number(10); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_a1 = CommitmentOf::::get(netuid_a, who_a).expect("still there"); + assert_eq!( + leftover_a1.info.fields.len(), + 1, + "Only the round=1000 timelock removed; round=2000 remains" + ); + assert!( + TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "Still in index with leftover timelock" + ); + + // + // Insert signature for round=2000 => final reveal => leftover=none => removed + // + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("Expected success"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(11); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_a2 = CommitmentOf::::get(netuid_a, who_a); + assert!( + leftover_a2.is_none(), + "All timelocks removed => none leftover" + ); + assert!( + !TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "User A removed from index after final reveal" + ); + }); +} + +#[test] +fn single_timelock_reveal_later_round() { + new_test_ext().execute_with(|| { + let netuid_b = 2; + let who_b = 20; + let round_2000 = 2000; + + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("Expected success"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + let msg_b = b"UserB single timelock (round=2000)"; + + let inner_b_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw(msg_b.to_vec().try_into().expect("Expected success"))]) + .expect("MaxFields >= 1"); + let inner_info_b: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_b_fields, + }; + let encoded_b = inner_info_b.encode(); + let ciphertext_b = produce_ciphertext(&encoded_b, round_2000); + let tle_b = Data::TimelockEncrypted { + encrypted: ciphertext_b, + reveal_round: round_2000, + }; + + let fields_b: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![tle_b]).expect("1 field"); + let info_b: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: fields_b }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who_b), + netuid_b, + Box::new(info_b) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "User B in index" + ); + + // Remove the round=2000 signature so first reveal does nothing + pallet_drand::Pulses::::remove(round_2000); + + System::::set_block_number(20); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_b1 = CommitmentOf::::get(netuid_b, who_b).expect("still there"); + assert_eq!( + leftover_b1.info.fields.len(), + 1, + "No signature => timelock remains" + ); + assert!( + TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "Still in index with leftover timelock" + ); + + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(21); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_b2 = CommitmentOf::::get(netuid_b, who_b); + assert!(leftover_b2.is_none(), "Timelock removed => leftover=none"); + assert!( + !TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "User B removed from index after final reveal" + ); + }); +} + +#[test] +fn tempo_based_space_limit_accumulates_in_same_window() { + new_test_ext().execute_with(|| { + let netuid = 1; + let who = 100; + let space_limit = 150; + MaxSpace::::set(space_limit); + System::::set_block_number(0); + + // A single commitment that uses some space, e.g. 30 bytes: + let data = vec![0u8; 30]; + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + data.try_into().expect("Data up to 128 bytes OK"), + )]) + .expect("1 field is <= MaxFields"), + }); + + // 2) First call => usage=0 => usage=30 after. OK. + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + info.clone(), + )); + + // 3) Second call => tries another 30 bytes in the SAME block => total=60 => exceeds 50 => should fail. + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid, info.clone()), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_resets_after_tempo() { + new_test_ext().execute_with(|| { + let netuid = 2; + let who = 101; + + MaxSpace::::set(250); + System::::set_block_number(1); + + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + )); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + System::::set_block_number(200); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + System::::set_block_number(360); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small + )); + }); +} + +#[test] +fn tempo_based_space_limit_does_not_affect_different_netuid() { + new_test_ext().execute_with(|| { + let netuid_a = 10; + let netuid_b = 20; + let who = 111; + let space_limit = 199; + MaxSpace::::set(space_limit); + + let commit_large = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 40].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_a, + commit_large.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_a, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_b, + commit_large + )); + + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid_b, commit_small), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_does_not_affect_different_user() { + new_test_ext().execute_with(|| { + let netuid = 10; + let user1 = 123; + let user2 = 456; + let space_limit = 199; + MaxSpace::::set(space_limit); + + let commit_large = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 40].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user1), + netuid, + commit_large.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(user1), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user2), + netuid, + commit_large + )); + + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(user2), netuid, commit_small), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_sudo_set_max_space() { + new_test_ext().execute_with(|| { + let netuid = 3; + let who = 15; + MaxSpace::::set(100); + + System::::set_block_number(1); + let commit_25 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 25].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_25.clone() + )); + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid, commit_25.clone()), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_max_space(RuntimeOrigin::root(), 300)); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_25 + )); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn on_initialize_reveals_matured_timelocks() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 7; + let reveal_round = 1000; + + let message_text = b"Timelock test via on_initialize"; + + let inner_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes is OK for Data::Raw"), + )]) + .expect("Should not exceed MaxFields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_fields, + }; + + let plaintext = inner_info.encode(); + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let outer_fields = BoundedVec::try_from(vec![Data::TimelockEncrypted { + encrypted, + reveal_round, + }]) + .expect("One field is well under MaxFields"); + let info_outer = CommitmentInfo { + fields: outer_fields, + }; + + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + assert!(CommitmentOf::::get(netuid, who).is_some()); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "Should appear in TimelockedIndex since it contains a timelock" + ); + + let drand_sig_hex = hex::decode(DRAND_QUICKNET_SIG_HEX) + .expect("Decoding DRAND_QUICKNET_SIG_HEX must not fail"); + insert_drand_pulse(reveal_round, &drand_sig_hex); + + assert!(RevealedCommitments::::get(netuid, who).is_none()); + + System::::set_block_number(2); + as Hooks>::on_initialize(2); + + let revealed_opt = RevealedCommitments::::get(netuid, who); + assert!( + revealed_opt.is_some(), + "Expected that the timelock got revealed at block #2" + ); + + let leftover = CommitmentOf::::get(netuid, who); + assert!( + leftover.is_none(), + "After revealing the only timelock, the entire commitment is removed." + ); + + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "No longer in TimelockedIndex after reveal." + ); + + let (revealed_bytes, reveal_block) = + revealed_opt.expect("expected to not panic")[0].clone(); + assert_eq!(reveal_block, 2, "Should have revealed at block #2"); + + let revealed_str = sp_std::str::from_utf8(&revealed_bytes) + .expect("Expected valid UTF-8 in the revealed bytes for this test"); + + let original_str = + sp_std::str::from_utf8(message_text).expect("`message_text` is valid UTF-8"); + assert!( + revealed_str.contains(original_str), + "Revealed data must contain the original message text." + ); + }); +} + +#[test] +fn set_commitment_unreserve_leftover_fails() { + new_test_ext().execute_with(|| { + use frame_system::RawOrigin; + + let netuid = 999; + let who = 99; + + Balances::make_free_balance_be(&who, 10_000); + + let fake_deposit = 100; + let dummy_info = CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("empty fields is fine"), + }; + let registration = Registration { + deposit: fake_deposit, + info: dummy_info, + block: 0u64.into(), + }; + + CommitmentOf::::insert(netuid, who, registration); + + assert_ok!(Balances::reserve(&who, fake_deposit)); + assert_eq!(Balances::reserved_balance(who), 100); + + Balances::unreserve(&who, 10_000); + assert_eq!(Balances::reserved_balance(who), 0); + + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("no fields is fine"), + }); + + assert_noop!( + Pallet::::set_commitment(RawOrigin::Signed(who).into(), netuid, commit_small), + Error::::UnexpectedUnreserveLeftover + ); + }); +} + +#[test] +fn timelocked_index_complex_scenario_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + + let netuid = 42; + let user_a = 1000; + let user_b = 2000; + let user_c = 3000; + + let make_timelock_data = |plaintext: &[u8], round: u64| { + let inner = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![Data::Raw( + plaintext.to_vec().try_into().expect("<=128 bytes"), + )]) + .expect("1 field is fine"), + }; + let ct = produce_ciphertext(&inner.encode(), round); + Data::TimelockEncrypted { + encrypted: ct, + reveal_round: round, + } + }; + + let make_raw_data = + |payload: &[u8]| Data::Raw(payload.to_vec().try_into().expect("expected to not panic")); + + // ---------------------------------------------------- + // (1) USER A => no timelocks => NOT in index + // ---------------------------------------------------- + let info_a1 = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![make_raw_data(b"A-regular")]) + .expect("1 field is fine"), + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user_a), + netuid, + Box::new(info_a1), + )); + assert!( + !TimelockedIndex::::get().contains(&(netuid, user_a)), + "A has no timelocks => not in TimelockedIndex" + ); + + // ---------------------------------------------------- + // (2) USER B => Single TLE => BUT USE round=2000! + // => B is in index + // ---------------------------------------------------- + let b_timelock_1 = make_timelock_data(b"B first TLE", 2000); + let info_b1 = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![b_timelock_1]).expect("Single TLE is fine"), + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user_b), + netuid, + Box::new(info_b1), + )); + let idx = TimelockedIndex::::get(); + assert!(!idx.contains(&(netuid, user_a)), "A not in index"); + assert!(idx.contains(&(netuid, user_b)), "B in index (has TLE)"); + + // ---------------------------------------------------- + // (3) USER A => 2 timelocks: round=1000 & round=2000 + // => A is in index + // ---------------------------------------------------- + let a_timelock_1 = make_timelock_data(b"A TLE #1", 1000); + let a_timelock_2 = make_timelock_data(b"A TLE #2", 2000); + let info_a2 = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![a_timelock_1, a_timelock_2]) + .expect("2 TLE fields OK"), + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user_a), + netuid, + Box::new(info_a2), + )); + + let idx = TimelockedIndex::::get(); + assert!(idx.contains(&(netuid, user_a)), "A in index"); + assert!(idx.contains(&(netuid, user_b)), "B still in index"); + + // ---------------------------------------------------- + // (4) USER B => remove all timelocks => B out of index + // ---------------------------------------------------- + let info_b2 = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![make_raw_data(b"B back to raw")]) + .expect("no TLE => B out"), + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user_b), + netuid, + Box::new(info_b2), + )); + let idx = TimelockedIndex::::get(); + assert!(idx.contains(&(netuid, user_a)), "A remains"); + assert!( + !idx.contains(&(netuid, user_b)), + "B removed after losing TLEs" + ); + + // ---------------------------------------------------- + // (5) USER B => re-add TLE => round=2000 => back in index + // ---------------------------------------------------- + let b_timelock_2 = make_timelock_data(b"B TLE #2", 2000); + let info_b3 = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![b_timelock_2]).expect("expected to not panic"), + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user_b), + netuid, + Box::new(info_b3), + )); + let idx = TimelockedIndex::::get(); + assert!(idx.contains(&(netuid, user_a)), "A in index"); + assert!(idx.contains(&(netuid, user_b)), "B back in index"); + + // ---------------------------------------------------- + // (6) USER C => sets 1 TLE => round=2000 => in index + // ---------------------------------------------------- + let c_timelock_1 = make_timelock_data(b"C TLE #1", 2000); + let info_c1 = CommitmentInfo:: { + fields: BoundedVec::try_from(vec![c_timelock_1]).expect("expected to not panic"), + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user_c), + netuid, + Box::new(info_c1), + )); + let idx = TimelockedIndex::::get(); + assert!(idx.contains(&(netuid, user_a)), "A"); + assert!(idx.contains(&(netuid, user_b)), "B"); + assert!(idx.contains(&(netuid, user_c)), "C"); + + // ---------------------------------------------------- + // (7) Partial reveal for round=1000 => affects only A + // because B & C have round=2000 + // ---------------------------------------------------- + let drand_sig_1000 = + hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode signature for round=1000"); + insert_drand_pulse(1000, &drand_sig_1000); + + System::::set_block_number(10); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // After revealing round=1000: + // - A: Loses TLE #1 (1000), still has TLE #2 (2000) => remains in index + // - B: referencing 2000 => unaffected => remains + // - C: referencing 2000 => remains + let idx = TimelockedIndex::::get(); + assert!( + idx.contains(&(netuid, user_a)), + "A has leftover round=2000 => remains in index" + ); + assert!(idx.contains(&(netuid, user_b)), "B unaffected"); + assert!(idx.contains(&(netuid, user_c)), "C unaffected"); + + // ---------------------------------------------------- + // (8) Reveal round=2000 => fully remove A, B, and C + // ---------------------------------------------------- + let drand_sig_2000 = + hex::decode(DRAND_QUICKNET_SIG_2000_HEX).expect("decode signature for round=2000"); + insert_drand_pulse(2000, &drand_sig_2000); + + System::::set_block_number(11); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // Now: + // - A's final TLE (#2 at 2000) is removed => A out + // - B had 2000 => out + // - C had 2000 => out + let idx = TimelockedIndex::::get(); + assert!( + !idx.contains(&(netuid, user_a)), + "A removed after 2000 reveal" + ); + assert!( + !idx.contains(&(netuid, user_b)), + "B removed after 2000 reveal" + ); + assert!( + !idx.contains(&(netuid, user_c)), + "C removed after 2000 reveal" + ); + + assert_eq!(idx.len(), 0, "All users revealed => index is empty"); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_bad_timelocks_are_removed() { + new_test_ext().execute_with(|| { + // + // 1) Prepare multiple Data::TimelockEncrypted fields with different “badness” scenarios + one good field + // + // Round used for valid Drand signature + let valid_round = 1000; + // Round used for intentionally invalid Drand signature + let invalid_sig_round = 999; + // Round that has *no* Drand pulse => timelock remains stored, not revealed yet + let no_pulse_round = 2001; + + // (a) TLE #1: Round=999 => Drand pulse *exists* but signature is invalid => skip/deleted + let plaintext_1 = b"BadSignature"; + let ciphertext_1 = produce_ciphertext(plaintext_1, invalid_sig_round); + let tle_bad_sig = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: invalid_sig_round, + }; + + // (b) TLE #2: Round=1000 => Drand signature is valid, but ciphertext is corrupted => skip/deleted + let plaintext_2 = b"CorruptedCiphertext"; + let good_ct_2 = produce_ciphertext(plaintext_2, valid_round); + let mut corrupted_ct_2 = good_ct_2.into_inner(); + if !corrupted_ct_2.is_empty() { + corrupted_ct_2[0] ^= 0xFF; // flip a byte + } + let tle_corrupted = Data::TimelockEncrypted { + encrypted: corrupted_ct_2.try_into().expect("Expected not to panic"), + reveal_round: valid_round, + }; + + // (c) TLE #3: Round=1000 => Drand signature valid, ciphertext good, *but* plaintext is empty => skip/deleted + let empty_good_ct = produce_ciphertext(&[], valid_round); + let tle_empty_plaintext = Data::TimelockEncrypted { + encrypted: empty_good_ct, + reveal_round: valid_round, + }; + + // (d) TLE #4: Round=1000 => Drand signature valid, ciphertext valid, nonempty plaintext => should be revealed + let plaintext_4 = b"Hello, I decrypt fine!"; + let good_ct_4 = produce_ciphertext(plaintext_4, valid_round); + let tle_good = Data::TimelockEncrypted { + encrypted: good_ct_4, + reveal_round: valid_round, + }; + + // (e) TLE #5: Round=2001 => no Drand pulse => remains in storage + let plaintext_5 = b"Still waiting for next round!"; + let good_ct_5 = produce_ciphertext(plaintext_5, no_pulse_round); + let tle_no_pulse = Data::TimelockEncrypted { + encrypted: good_ct_5, + reveal_round: no_pulse_round, + }; + + // + // 2) Assemble them all in one CommitmentInfo + // + let fields = vec![ + tle_bad_sig, // #1 + tle_corrupted, // #2 + tle_empty_plaintext, // #3 + tle_good, // #4 + tle_no_pulse, // #5 + ]; + let fields_bounded = BoundedVec::try_from(fields).expect("Should not exceed MaxFields"); + let info = CommitmentInfo { + fields: fields_bounded, + }; + + // + // 3) Insert the commitment + // + let who = 123; + let netuid = 777; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RawOrigin::Signed(who).into(), + netuid, + Box::new(info) + )); + + // + // 4) Insert pulses: + // - Round=999 => invalid signature => attempts to parse => fails => remove TLE #1 + // - Round=1000 => valid signature => TLE #2 is corrupted => remove; #3 empty => remove; #4 reveals successfully + // - Round=2001 => no signature => TLE #5 remains + // + let bad_sig = [0x33u8; 10]; // obviously invalid for TinyBLS + insert_drand_pulse(invalid_sig_round, &bad_sig); + + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(valid_round, &drand_sig_1000); + + // + // 5) Call reveal => “bad” items are removed, “good” is revealed, “not ready” remains + // + System::::set_block_number(2); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // + // 6) Check final storage + // + // (a) TLE #5 => still in fields => same user remains in CommitmentOf => TimelockedIndex includes them + let registration_after = + CommitmentOf::::get(netuid, who).expect("Should still exist"); + assert_eq!( + registration_after.info.fields.len(), + 1, + "Only the unrevealed TLE #5 should remain" + ); + let leftover = ®istration_after.info.fields[0]; + match leftover { + Data::TimelockEncrypted { reveal_round, .. } => { + assert_eq!(*reveal_round, no_pulse_round, "Should be TLE #5 leftover"); + } + _ => panic!("Expected the leftover field to be TLE #5"), + }; + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "Still in index because there's one remaining timelock (#5)." + ); + + // (b) TLE #4 => revealed => check that the plaintext matches + let revealed = RevealedCommitments::::get(netuid, who) + .expect("Should have at least one revealed item for TLE #4"); + let (revealed_bytes, reveal_block) = &revealed[0]; + assert_eq!(*reveal_block, 2, "Revealed at block #2"); + + let revealed_str = sp_std::str::from_utf8(revealed_bytes) + .expect("Truncated bytes should be valid UTF-8 in this test"); + + let original_str = + sp_std::str::from_utf8(plaintext_4).expect("plaintext_4 should be valid UTF-8"); + + assert_eq!( + revealed_str, original_str, + "Expected revealed data to match the original plaintext" + ); + + // (c) TLE #1 / #2 / #3 => removed => do NOT appear in leftover fields, nor in revealed (they were invalid) + assert_eq!(revealed.len(), 1, "Only TLE #4 ended up in revealed list"); + }); +} + +#[test] +fn revealed_commitments_keeps_only_10_items() { + new_test_ext().execute_with(|| { + let netuid = 1; + let who = 2; + let reveal_round = 1000; + + let drand_sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Should decode DRAND sig"); + insert_drand_pulse(reveal_round, &drand_sig_bytes); + + // --- 1) Build 12 TimelockEncrypted fields --- + // Each one has a unique plaintext "TLE #i" + const TOTAL_TLES: usize = 12; + let mut fields = Vec::with_capacity(TOTAL_TLES); + + for i in 0..TOTAL_TLES { + let plaintext = format!("TLE #{}", i).into_bytes(); + let ciphertext = produce_ciphertext(&plaintext, reveal_round); + let timelock = Data::TimelockEncrypted { + encrypted: ciphertext, + reveal_round, + }; + fields.push(timelock); + } + let fields_bounded = BoundedVec::try_from(fields).expect("Should not exceed MaxFields"); + let info = CommitmentInfo { + fields: fields_bounded, + }; + + // --- 2) Set the commitment => 12 timelocks in storage --- + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info) + )); + + // --- 3) Reveal => all 12 are decrypted in one shot --- + System::::set_block_number(2); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // --- 4) Check we only keep 10 in `RevealedCommitments` --- + let revealed = RevealedCommitments::::get(netuid, who) + .expect("Should have at least some revealed data"); + assert_eq!( + revealed.len(), + 10, + "We must only keep the newest 10, out of 12 total" + ); + + // The oldest 2 ("TLE #0" and "TLE #1") must be dropped. + // The items in `revealed` now correspond to "TLE #2" .. "TLE #11". + for (idx, (revealed_bytes, reveal_block)) in revealed.iter().enumerate() { + // Convert to UTF-8 + let revealed_str = sp_std::str::from_utf8(revealed_bytes) + .expect("Decrypted data should be valid UTF-8 for this test case"); + + // We expect them to be TLE #2..TLE #11 + let expected_index = idx + 2; // since we dropped #0 and #1 + let expected_str = format!("TLE #{}", expected_index); + assert_eq!(revealed_str, expected_str, "Check which TLE is kept"); + + // Also check it was revealed at block 2 + assert_eq!(*reveal_block, 2, "All reveal in the same block #2"); + } + }); +} + +#[test] +fn revealed_commitments_keeps_only_10_newest_with_individual_single_field_commits() { + new_test_ext().execute_with(|| { + let netuid = 1; + let who = 2; + let reveal_round = 1000; + + let drand_sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode DRAND sig"); + insert_drand_pulse(reveal_round, &drand_sig_bytes); + + // We will add 12 separate timelocks, one per iteration, each in its own set_commitment call. + // After each insertion, we call reveal + increment the block by 1. + + for i in 0..12 { + System::::set_block_number(i as u64 + 1); + + let plaintext = format!("TLE #{}", i).into_bytes(); + let ciphertext = produce_ciphertext(&plaintext, reveal_round); + + let new_timelock = Data::TimelockEncrypted { + encrypted: ciphertext, + reveal_round, + }; + + let fields = BoundedVec::try_from(vec![new_timelock]) + .expect("Single field is well within MaxFields"); + let info = CommitmentInfo { fields }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info) + )); + + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let revealed = RevealedCommitments::::get(netuid, who).unwrap_or_default(); + let expected_count = (i + 1).min(10); + assert_eq!( + revealed.len(), + expected_count, + "At iteration {}, we keep at most 10 reveals", + i + ); + } + + let revealed = + RevealedCommitments::::get(netuid, who).expect("expected to not panic"); + assert_eq!( + revealed.len(), + 10, + "After 12 total commits, only 10 remain revealed" + ); + + // Check that TLE #0 and TLE #1 are dropped; TLE #2..#11 remain in ascending order. + for (idx, (revealed_bytes, reveal_block)) in revealed.iter().enumerate() { + let revealed_str = + sp_std::str::from_utf8(revealed_bytes).expect("Should be valid UTF-8"); + let expected_i = idx + 2; // i=0 => "TLE #2", i=1 => "TLE #3", etc. + let expected_str = format!("TLE #{}", expected_i); + + assert_eq!( + revealed_str, expected_str, + "Revealed data #{} should match the truncated TLE #{}", + idx, expected_i + ); + + let expected_reveal_block = expected_i as u64 + 1; + assert_eq!( + *reveal_block, expected_reveal_block, + "Check which block TLE #{} was revealed in", + expected_i + ); + } + }); +} + +#[test] +fn usage_respects_minimum_of_100_bytes() { + new_test_ext().execute_with(|| { + MaxSpace::::set(1000); + + let netuid = 1; + let who = 99; + + System::::set_block_number(1); + + let small_data = Data::Raw(vec![0u8; 50].try_into().expect("<=128 bytes for Raw")); + let info_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![small_data]).expect("Must not exceed MaxFields"), + }); + + let usage_before = UsedSpaceOf::::get(netuid, who).unwrap_or_default(); + assert_eq!(usage_before.used_space, 0); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + info_small + )); + + let usage_after_small = + UsedSpaceOf::::get(netuid, who).expect("expected to not panic"); + assert_eq!( + usage_after_small.used_space, 100, + "Usage must jump to 100 even though we only used 50 bytes" + ); + + let big_data = Data::Raw(vec![0u8; 110].try_into().expect("<=128 bytes for Raw")); + let info_big = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![big_data]).expect("Must not exceed MaxFields"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + info_big + )); + + let usage_after_big = UsedSpaceOf::::get(netuid, who).expect("expected to not panic"); + assert_eq!( + usage_after_big.used_space, 210, + "Usage should be 100 + 110 = 210 in this epoch" + ); + + UsedSpaceOf::::remove(netuid, who); + let usage_after_wipe = UsedSpaceOf::::get(netuid, who); + assert!( + usage_after_wipe.is_none(), + "Expected `UsedSpaceOf` entry to be removed" + ); + + let bigger_data = Data::Raw(vec![0u8; 120].try_into().expect("<=128 bytes for Raw")); + let info_bigger = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![bigger_data]).expect("Must not exceed MaxFields"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + info_bigger + )); + + let usage_after_reset = + UsedSpaceOf::::get(netuid, who).expect("expected to not panic"); + assert_eq!( + usage_after_reset.used_space, 120, + "After wiping old usage, the new usage should be exactly 120" + ); + }); +} diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index bc0531ece4..0f1d2302a5 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -53,12 +53,34 @@ pub enum Data { /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. ShaThree256([u8; 32]), + /// A timelock-encrypted commitment with a reveal round. + TimelockEncrypted { + encrypted: BoundedVec>, + reveal_round: u64, + }, } impl Data { pub fn is_none(&self) -> bool { self == &Data::None } + + /// Check if this is a timelock-encrypted commitment. + pub fn is_timelock_encrypted(&self) -> bool { + matches!(self, Data::TimelockEncrypted { .. }) + } + + pub fn len_for_rate_limit(&self) -> u64 { + match self { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u64, + Data::BlakeTwo256(arr) + | Data::Sha256(arr) + | Data::Keccak256(arr) + | Data::ShaThree256(arr) => arr.len() as u64, + Data::TimelockEncrypted { encrypted, .. } => encrypted.len() as u64, + } + } } impl Decode for Data { @@ -77,6 +99,15 @@ impl Decode for Data { 131 => Data::Sha256(<[u8; 32]>::decode(input)?), 132 => Data::Keccak256(<[u8; 32]>::decode(input)?), 133 => Data::ShaThree256(<[u8; 32]>::decode(input)?), + 134 => { + let encrypted = + BoundedVec::>::decode(input)?; + let reveal_round = u64::decode(input)?; + Data::TimelockEncrypted { + encrypted, + reveal_round, + } + } _ => return Err(codec::Error::from("invalid leading byte")), }) } @@ -96,6 +127,15 @@ impl Encode for Data { Data::Sha256(h) => once(131).chain(h.iter().cloned()).collect(), Data::Keccak256(h) => once(132).chain(h.iter().cloned()).collect(), Data::ShaThree256(h) => once(133).chain(h.iter().cloned()).collect(), + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + let mut r = vec![134]; + r.extend_from_slice(&encrypted.encode()); + r.extend_from_slice(&reveal_round.encode()); + r + } } } } @@ -270,6 +310,17 @@ impl TypeInfo for Data { .variant("ShaThree256", |v| { v.index(133) .fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }) + .variant("TimelockEncrypted", |v| { + v.index(134).fields( + Fields::named() + .field(|f| { + f.name("encrypted") + .ty::>>( + ) + }) + .field(|f| f.name("reveal_round").ty::()), + ) }); Type::builder() @@ -295,6 +346,28 @@ pub struct CommitmentInfo> { pub fields: BoundedVec, } +/// Maximum size of the serialized timelock commitment in bytes +pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; + +/// Contains the decrypted data of a revealed commitment. +#[freeze_struct("bf575857b57f9bef")] +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] +pub struct RevealedData, BlockNumber> { + pub info: CommitmentInfo, + pub revealed_block: BlockNumber, + pub deposit: Balance, +} + +/// Tracks how much “space” each (netuid, who) has used within the current RateLimit block-window. +#[freeze_struct("1f23fb50f96326e4")] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, TypeInfo)] +pub struct UsageTracker { + /// Last epoch block + pub last_epoch: u64, + /// Space used + pub used_space: u64, +} + /// Information concerning the identity of the controller of an account. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a @@ -345,71 +418,3 @@ impl< }) } } - -#[cfg(test)] -#[allow(clippy::indexing_slicing, clippy::unwrap_used)] -mod tests { - use super::*; - - #[test] - fn manual_data_type_info() { - let mut registry = scale_info::Registry::new(); - let type_id = registry.register_type(&scale_info::meta_type::()); - let registry: scale_info::PortableRegistry = registry.into(); - let type_info = registry.resolve(type_id.id).unwrap(); - - let check_type_info = |data: &Data| { - let variant_name = match data { - Data::None => "None".to_string(), - Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), - Data::Sha256(_) => "Sha256".to_string(), - Data::Keccak256(_) => "Keccak256".to_string(), - Data::ShaThree256(_) => "ShaThree256".to_string(), - Data::Raw(bytes) => format!("Raw{}", bytes.len()), - }; - if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { - let variant = variant - .variants - .iter() - .find(|v| v.name == variant_name) - .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); - - let field_arr_len = variant - .fields - .first() - .and_then(|f| registry.resolve(f.ty.id)) - .map(|ty| { - if let scale_info::TypeDef::Array(arr) = &ty.type_def { - arr.len - } else { - panic!("Should be an array type") - } - }) - .unwrap_or(0); - - let encoded = data.encode(); - assert_eq!(encoded[0], variant.index); - assert_eq!(encoded.len() as u32 - 1, field_arr_len); - } else { - panic!("Should be a variant type") - }; - }; - - let mut data = vec![ - Data::None, - Data::BlakeTwo256(Default::default()), - Data::Sha256(Default::default()), - Data::Keccak256(Default::default()), - Data::ShaThree256(Default::default()), - ]; - - // A Raw instance for all possible sizes of the Raw data - for n in 0..128 { - data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())) - } - - for d in data.iter() { - check_type_info(d); - } - } -} diff --git a/pallets/proxy/Cargo.toml b/pallets/proxy/Cargo.toml new file mode 100644 index 0000000000..f3a97dfedf --- /dev/null +++ b/pallets/proxy/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "pallet-proxy" +version = "38.0.0" +authors = ["Bittensor Nucleus Team"] +edition = "2021" +license = "Apache-2.0" +homepage = "https://bittensor.com" +description = "FRAME proxying pallet" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = ["max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support.workspace = true +frame-system.workspace = true +sp-io.workspace = true +sp-runtime.workspace = true +subtensor-macros.workspace = true + +[dev-dependencies] +pallet-balances = { default-features = true, workspace = true } +pallet-utility = { default-features = true, workspace = true } +sp-core = { default-features = true, workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-io/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-utility/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", + "pallet-balances/try-runtime", + "pallet-utility/try-runtime" +] diff --git a/pallets/proxy/README.md b/pallets/proxy/README.md new file mode 100644 index 0000000000..290c49c050 --- /dev/null +++ b/pallets/proxy/README.md @@ -0,0 +1,26 @@ +# Proxy Module +A module allowing accounts to give permission to other accounts to dispatch types of calls from +their signed origin. + +The accounts to which permission is delegated may be required to announce the action that they +wish to execute some duration prior to execution happens. In this case, the target account may +reject the announcement and in doing so, veto the execution. + +- [`Config`](https://docs.rs/pallet-proxy/latest/pallet_proxy/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-proxy/latest/pallet_proxy/pallet/enum.Call.html) + +## Overview + +## Interface + +### Dispatchable Functions + +[`Call`]: ./enum.Call.html +[`Config`]: ./trait.Config.html + +License: Apache-2.0 + + +## Release + +Polkadot SDK stable2409 diff --git a/pallets/proxy/src/benchmarking.rs b/pallets/proxy/src/benchmarking.rs new file mode 100644 index 0000000000..f519c0f0c3 --- /dev/null +++ b/pallets/proxy/src/benchmarking.rs @@ -0,0 +1,261 @@ +// This file is part of Substrate. +// +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0/ +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Benchmarks for Proxy Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use crate::Pallet as Proxy; +use alloc::{boxed::Box, vec}; +use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use sp_runtime::traits::{Bounded, CheckedDiv}; + +const SEED: u32 = 0; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +fn half_max_balance() -> BalanceOf { + BalanceOf::::max_value() + .checked_div(&BalanceOf::::from(2_u32)) + .unwrap_or_else(BalanceOf::::max_value) +} + +fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { + let caller = maybe_who.unwrap_or_else(whitelisted_caller); + T::Currency::make_free_balance_be(&caller, half_max_balance::()); + for i in 0..n { + let real = T::Lookup::unlookup(account("target", i, SEED)); + + Proxy::::add_proxy( + RawOrigin::Signed(caller.clone()).into(), + real, + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + } + Ok(()) +} + +fn add_announcements( + n: u32, + maybe_who: Option, + maybe_real: Option, +) -> Result<(), &'static str> { + let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, half_max_balance::()); + let real = if let Some(real) = maybe_real { + real + } else { + let real = account("real", 0, SEED); + T::Currency::make_free_balance_be(&real, half_max_balance::()); + Proxy::::add_proxy( + RawOrigin::Signed(real.clone()).into(), + caller_lookup, + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + real + }; + let real_lookup = T::Lookup::unlookup(real); + for _ in 0..n { + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real_lookup.clone(), + T::CallHasher::hash_of(&("add_announcement", n)), + )?; + } + Ok(()) +} + +benchmarks! { + proxy { + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); + T::Currency::make_free_balance_be(&caller, half_max_balance::()); + let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) + verify { + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + } + + proxy_announced { + let a in 0 .. T::MaxPending::get().saturating_sub(1); + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = account("pure", 0, SEED); + let delegate: T::AccountId = account("target", p.saturating_sub(1), SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + T::Currency::make_free_balance_be(&delegate, half_max_balance::()); + let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + Proxy::::announce( + RawOrigin::Signed(delegate.clone()).into(), + real_lookup.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(delegate.clone()), None)?; + }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) + verify { + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + } + + remove_announcement { + let a in 0 .. T::MaxPending::get().saturating_sub(1); + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); + T::Currency::make_free_balance_be(&caller, half_max_balance::()); + let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real_lookup.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(caller.clone()), None)?; + }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) + verify { + let (announcements, _) = Announcements::::get(&caller); + assert_eq!(announcements.len() as u32, a); + } + + reject_announcement { + let a in 0 .. T::MaxPending::get().saturating_sub(1); + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, half_max_balance::()); + let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real.clone()); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real_lookup, + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(caller.clone()), None)?; + }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) + verify { + let (announcements, _) = Announcements::::get(&caller); + assert_eq!(announcements.len() as u32, a); + } + + announce { + let a in 0 .. T::MaxPending::get().saturating_sub(1); + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); + T::Currency::make_free_balance_be(&caller, half_max_balance::()); + let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real.clone()); + add_announcements::(a, Some(caller.clone()), None)?; + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call_hash = T::CallHasher::hash_of(&call); + }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) + verify { + assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); + } + + add_proxy { + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = whitelisted_caller(); + let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); + }: _( + RawOrigin::Signed(caller.clone()), + real, + T::ProxyType::default(), + BlockNumberFor::::zero() + ) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, p.saturating_add(1)); + } + + remove_proxy { + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = whitelisted_caller(); + let delegate = T::Lookup::unlookup(account("target", 0, SEED)); + }: _( + RawOrigin::Signed(caller.clone()), + delegate, + T::ProxyType::default(), + BlockNumberFor::::zero() + ) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, p.saturating_sub(1)); + } + + remove_proxies { + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller.clone())) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, 0); + } + + create_pure { + let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + let caller: T::AccountId = whitelisted_caller(); + }: _( + RawOrigin::Signed(caller.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + 0 + ) + verify { + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(Event::PureCreated { + pure: pure_account, + who: caller, + proxy_type: T::ProxyType::default(), + disambiguation_index: 0, + }.into()); + } + + kill_pure { + let p in 0 .. (T::MaxProxies::get().saturating_sub(2)); + + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + Pallet::::create_pure( + RawOrigin::Signed(whitelisted_caller()).into(), + T::ProxyType::default(), + BlockNumberFor::::zero(), + 0 + )?; + let height = system::Pallet::::block_number(); + let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); + + add_proxies::(p, Some(pure_account.clone()))?; + ensure!(Proxies::::contains_key(&pure_account), "pure proxy not created"); + }: _(RawOrigin::Signed(pure_account.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) + verify { + assert!(!Proxies::::contains_key(&pure_account)); + } + + impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/pallets/proxy/src/lib.rs b/pallets/proxy/src/lib.rs new file mode 100644 index 0000000000..3f45951190 --- /dev/null +++ b/pallets/proxy/src/lib.rs @@ -0,0 +1,891 @@ +// This file is part of Substrate. +// +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0/ +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Proxy Pallet +//! A pallet allowing accounts to give permission to other accounts to dispatch types of calls from +//! their signed origin. +//! +//! The accounts to which permission is delegated may be required to announce the action that they +//! wish to execute some duration prior to execution happens. In this case, the target account may +//! reject the announcement and in doing so, veto the execution. +//! +//! - [`Config`] +//! - [`Call`] + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod tests; +pub mod weights; + +extern crate alloc; + +use alloc::{boxed::Box, vec}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::pallet_prelude::{Pays, Weight}; +use frame_support::{ + dispatch::GetDispatchInfo, + ensure, + traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, + BoundedVec, +}; +use frame_system::{self as system, ensure_signed, pallet_prelude::BlockNumberFor}; +pub use pallet::*; +use scale_info::{prelude::cmp::Ordering, TypeInfo}; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, + DispatchError, DispatchResult, RuntimeDebug, +}; +use subtensor_macros::freeze_struct; +pub use weights::WeightInfo; + +type CallHashOf = <::CallHasher as Hash>::Output; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + +/// The parameters under which a particular account has a proxy relationship with some other +/// account. +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + Ord, + PartialOrd, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, +)] +#[freeze_struct("a37bb67fe5520678")] +pub struct ProxyDefinition { + /// The account which may act on behalf of another. + pub delegate: AccountId, + /// A value defining the subset of calls that it is allowed to make. + pub proxy_type: ProxyType, + /// The number of blocks that an announcement must be in place for before the corresponding + /// call may be dispatched. If zero, then no announcement is needed. + pub delay: BlockNumber, +} + +/// Details surrounding a specific instance of an announcement to make a call. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +#[freeze_struct("4c1b5c8c3bc489ad")] +pub struct Announcement { + /// The account which made the announcement. + real: AccountId, + /// The hash of the call to be made. + call_hash: Hash, + /// The height at which the announcement was made. + height: BlockNumber, +} + +#[frame_support::pallet] +pub mod pallet { + use super::{DispatchResult, *}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The overarching call type. + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + IsSubType> + + IsType<::RuntimeCall>; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` filter. + /// The instance filter determines whether a given call may be proxied under this type. + /// + /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. + type ProxyType: Parameter + + Member + + Ord + + PartialOrd + + InstanceFilter<::RuntimeCall> + + Default + + MaxEncodedLen; + + /// The base amount of currency needed to reserve for creating a proxy. + /// + /// This is held for an additional storage item whose value size is + /// `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes. + #[pallet::constant] + type ProxyDepositBase: Get>; + + /// The amount of currency needed per proxy added. + /// + /// This is held for adding 32 bytes plus an instance of `ProxyType` more into a + /// pre-existing storage value. Thus, when configuring `ProxyDepositFactor` one should take + /// into account `32 + proxy_type.encode().len()` bytes of data. + #[pallet::constant] + type ProxyDepositFactor: Get>; + + /// The maximum amount of proxies allowed for a single account. + #[pallet::constant] + type MaxProxies: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// The maximum amount of time-delayed announcements that are allowed to be pending. + #[pallet::constant] + type MaxPending: Get; + + /// The type of hash used for hashing the call. + type CallHasher: Hash; + + /// The base amount of currency needed to reserve for creating an announcement. + /// + /// This is held when a new storage item holding a `Balance` is created (typically 16 + /// bytes). + #[pallet::constant] + type AnnouncementDepositBase: Get>; + + /// The amount of currency needed per announcement made. + /// + /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) + /// into a pre-existing storage value. + #[pallet::constant] + type AnnouncementDepositFactor: Get>; + } + + #[pallet::call] + impl Pallet { + /// Dispatch the given `call` from an account that the sender is authorised for through + /// `add_proxy`. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. + /// - `call`: The call to be made by the `real` account. + #[pallet::call_index(0)] + #[pallet::weight({ + let di = call.get_dispatch_info(); + let inner_call_weight = match di.pays_fee { + Pays::Yes => di.weight, + Pays::No => Weight::zero(), + }; + let base_weight = T::WeightInfo::proxy(T::MaxProxies::get()) + .saturating_add(T::DbWeight::get().reads_writes(1, 1)); + (base_weight.saturating_add(inner_call_weight), di.class) + })] + pub fn proxy( + origin: OriginFor, + real: AccountIdLookupOf, + force_proxy_type: Option, + call: Box<::RuntimeCall>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; + let def = Self::find_proxy(&real, &who, force_proxy_type)?; + ensure!(def.delay.is_zero(), Error::::Unannounced); + + Self::do_proxy(def, real, *call); + + Ok(()) + } + + /// Register a proxy account for the sender that is able to make calls on its behalf. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `proxy`: The account that the `caller` would like to make a proxy. + /// - `proxy_type`: The permissions allowed for this proxy account. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] + pub fn add_proxy( + origin: OriginFor, + delegate: AccountIdLookupOf, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::add_proxy_delegate(&who, delegate, proxy_type, delay) + } + + /// Unregister a proxy account for the sender. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `proxy`: The account that the `caller` would like to remove as a proxy. + /// - `proxy_type`: The permissions currently enabled for the removed proxy account. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] + pub fn remove_proxy( + origin: OriginFor, + delegate: AccountIdLookupOf, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::remove_proxy_delegate(&who, delegate, proxy_type, delay) + } + + /// Unregister all proxy accounts for the sender. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// WARNING: This may be called on accounts created by `pure`, however if done, then + /// the unreserved fees will be inaccessible. **All access to this account will be lost.** + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get()))] + pub fn remove_proxies(origin: OriginFor) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::remove_all_proxy_delegates(&who); + Ok(()) + } + + /// Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and + /// initialize it with a proxy of `proxy_type` for `origin` sender. + /// + /// Requires a `Signed` origin. + /// + /// - `proxy_type`: The type of the proxy that the sender will be registered as over the + /// new account. This will almost always be the most permissive `ProxyType` possible to + /// allow for maximum flexibility. + /// - `index`: A disambiguation index, in case this is called multiple times in the same + /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just + /// want to use `0`. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. + /// + /// Fails with `Duplicate` if this has already been called in this transaction, from the + /// same sender, with the same parameters. + /// + /// Fails if there are insufficient funds to pay for deposit. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::create_pure(T::MaxProxies::get()))] + pub fn create_pure( + origin: OriginFor, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + index: u16, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let pure = Self::pure_account(&who, &proxy_type, index, None); + ensure!(!Proxies::::contains_key(&pure), Error::::Duplicate); + + let proxy_def = ProxyDefinition { + delegate: who.clone(), + proxy_type: proxy_type.clone(), + delay, + }; + let bounded_proxies: BoundedVec<_, T::MaxProxies> = vec![proxy_def] + .try_into() + .map_err(|_| Error::::TooMany)?; + + let deposit = T::ProxyDepositBase::get().saturating_add(T::ProxyDepositFactor::get()); + T::Currency::reserve(&who, deposit)?; + + Proxies::::insert(&pure, (bounded_proxies, deposit)); + Self::deposit_event(Event::PureCreated { + pure, + who, + proxy_type, + disambiguation_index: index, + }); + + Ok(()) + } + + /// Removes a previously spawned pure proxy. + /// + /// WARNING: **All access to this account will be lost.** Any funds held in it will be + /// inaccessible. + /// + /// Requires a `Signed` origin, and the sender account must have been created by a call to + /// `pure` with corresponding parameters. + /// + /// - `spawner`: The account that originally called `pure` to create this account. + /// - `index`: The disambiguation index originally passed to `pure`. Probably `0`. + /// - `proxy_type`: The proxy type originally passed to `pure`. + /// - `height`: The height of the chain when the call to `pure` was processed. + /// - `ext_index`: The extrinsic index in which the call to `pure` was processed. + /// + /// Fails with `NoPermission` in case the caller is not a previously created pure + /// account whose `pure` call has corresponding parameters. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::kill_pure(T::MaxProxies::get()))] + pub fn kill_pure( + origin: OriginFor, + spawner: AccountIdLookupOf, + proxy_type: T::ProxyType, + index: u16, + #[pallet::compact] height: BlockNumberFor, + #[pallet::compact] ext_index: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let spawner = T::Lookup::lookup(spawner)?; + + let when = (height, ext_index); + let proxy = Self::pure_account(&spawner, &proxy_type, index, Some(when)); + ensure!(proxy == who, Error::::NoPermission); + + let (_, deposit) = Proxies::::take(&who); + T::Currency::unreserve(&spawner, deposit); + + Ok(()) + } + + /// Publish the hash of a proxy-call that will be made in the future. + /// + /// This must be called some number of blocks before the corresponding `proxy` is attempted + /// if the delay associated with the proxy relationship is greater than zero. + /// + /// No more than `MaxPending` announcements may be made at any one time. + /// + /// This will take a deposit of `AnnouncementDepositFactor` as well as + /// `AnnouncementDepositBase` if there are no other pending announcements. + /// + /// The dispatch origin for this call must be _Signed_ and a proxy of `real`. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `call_hash`: The hash of the call to be made by the `real` account. + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get()))] + pub fn announce( + origin: OriginFor, + real: AccountIdLookupOf, + call_hash: CallHashOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; + Proxies::::get(&real) + .0 + .into_iter() + .find(|x| x.delegate == who) + .ok_or(Error::::NotProxy)?; + + let announcement = Announcement { + real: real.clone(), + call_hash, + height: system::Pallet::::block_number(), + }; + + Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { + pending + .try_push(announcement) + .map_err(|_| Error::::TooMany)?; + Self::rejig_deposit( + &who, + *deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + pending.len(), + ) + .map(|d| { + d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed") + }) + .map(|d| *deposit = d) + })?; + Self::deposit_event(Event::Announced { + real, + proxy: who, + call_hash, + }); + + Ok(()) + } + + /// Remove a given announcement. + /// + /// May be called by a proxy account to remove a call they previously announced and return + /// the deposit. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `call_hash`: The hash of the call to be made by the `real` account. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::remove_announcement( + T::MaxPending::get(), + T::MaxProxies::get() + ))] + pub fn remove_announcement( + origin: OriginFor, + real: AccountIdLookupOf, + call_hash: CallHashOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; + Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; + + Ok(()) + } + + /// Remove the given announcement of a delegate. + /// + /// May be called by a target (proxied) account to remove a call that one of their delegates + /// (`delegate`) has announced they want to execute. The deposit is returned. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `delegate`: The account that previously announced the call. + /// - `call_hash`: The hash of the call to be made. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::reject_announcement( + T::MaxPending::get(), + T::MaxProxies::get() + ))] + pub fn reject_announcement( + origin: OriginFor, + delegate: AccountIdLookupOf, + call_hash: CallHashOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::edit_announcements(&delegate, |ann| { + ann.real != who || ann.call_hash != call_hash + })?; + + Ok(()) + } + + /// Dispatch the given `call` from an account that the sender is authorized for through + /// `add_proxy`. + /// + /// Removes any corresponding announcement(s). + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. + /// - `call`: The call to be made by the `real` account. + #[pallet::call_index(9)] + #[pallet::weight({ + let di = call.get_dispatch_info(); + (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.weight), + di.class) + })] + pub fn proxy_announced( + origin: OriginFor, + delegate: AccountIdLookupOf, + real: AccountIdLookupOf, + force_proxy_type: Option, + call: Box<::RuntimeCall>, + ) -> DispatchResult { + ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + let real = T::Lookup::lookup(real)?; + let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; + + let call_hash = T::CallHasher::hash_of(&call); + let now = system::Pallet::::block_number(); + Self::edit_announcements(&delegate, |ann| { + ann.real != real + || ann.call_hash != call_hash + || now.saturating_sub(ann.height) < def.delay + }) + .map_err(|_| Error::::Unannounced)?; + + Self::do_proxy(def, real, *call); + + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A proxy was executed correctly, with the given. + ProxyExecuted { result: DispatchResult }, + /// A pure account has been created by new proxy with given + /// disambiguation index and proxy type. + PureCreated { + pure: T::AccountId, + who: T::AccountId, + proxy_type: T::ProxyType, + disambiguation_index: u16, + }, + /// An announcement was placed to make a call in the future. + Announced { + real: T::AccountId, + proxy: T::AccountId, + call_hash: CallHashOf, + }, + /// A proxy was added. + ProxyAdded { + delegator: T::AccountId, + delegatee: T::AccountId, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + }, + /// A proxy was removed. + ProxyRemoved { + delegator: T::AccountId, + delegatee: T::AccountId, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + }, + } + + #[pallet::error] + pub enum Error { + /// There are too many proxies registered or too many announcements pending. + TooMany, + /// Proxy registration not found. + NotFound, + /// Sender is not a proxy of the account to be proxied. + NotProxy, + /// A call which is incompatible with the proxy type's filter was attempted. + Unproxyable, + /// Account is already a proxy. + Duplicate, + /// Call may not be made by proxy because it may escalate its privileges. + NoPermission, + /// Announcement, if made at all, was made too recently. + Unannounced, + /// Cannot add self as proxy. + NoSelfProxy, + } + + /// The set of account proxies. Maps the account which has delegated to the accounts + /// which are being delegated to, together with the amount held on deposit. + #[pallet::storage] + pub type Proxies = StorageMap< + _, + Twox64Concat, + T::AccountId, + ( + BoundedVec< + ProxyDefinition>, + T::MaxProxies, + >, + BalanceOf, + ), + ValueQuery, + >; + + /// The announcements made by the proxy (key). + #[pallet::storage] + pub type Announcements = StorageMap< + _, + Twox64Concat, + T::AccountId, + ( + BoundedVec, BlockNumberFor>, T::MaxPending>, + BalanceOf, + ), + ValueQuery, + >; +} + +impl Pallet { + /// Public function to proxies storage. + pub fn proxies( + account: T::AccountId, + ) -> ( + BoundedVec>, T::MaxProxies>, + BalanceOf, + ) { + Proxies::::get(account) + } + + /// Public function to announcements storage. + pub fn announcements( + account: T::AccountId, + ) -> ( + BoundedVec, BlockNumberFor>, T::MaxPending>, + BalanceOf, + ) { + Announcements::::get(account) + } + + /// Calculate the address of an pure account. + /// + /// - `who`: The spawner account. + /// - `proxy_type`: The type of the proxy that the sender will be registered as over the + /// new account. This will almost always be the most permissive `ProxyType` possible to + /// allow for maximum flexibility. + /// - `index`: A disambiguation index, in case this is called multiple times in the same + /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just + /// want to use `0`. + /// - `maybe_when`: The block height and extrinsic index of when the pure account was + /// created. None to use current block height and extrinsic index. + pub fn pure_account( + who: &T::AccountId, + proxy_type: &T::ProxyType, + index: u16, + maybe_when: Option<(BlockNumberFor, u32)>, + ) -> T::AccountId { + let (height, ext_index) = maybe_when.unwrap_or_else(|| { + ( + system::Pallet::::block_number(), + system::Pallet::::extrinsic_index().unwrap_or_default(), + ) + }); + let entropy = ( + b"modlpy/proxy____", + who, + height, + ext_index, + proxy_type, + index, + ) + .using_encoded(blake2_256); + Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .expect("infinite length input; no invalid inputs for type; qed") + } + + /// Register a proxy account for the delegator that is able to make calls on its behalf. + /// + /// Parameters: + /// - `delegator`: The delegator account. + /// - `delegatee`: The account that the `delegator` would like to make a proxy. + /// - `proxy_type`: The permissions allowed for this proxy account. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. + pub fn add_proxy_delegate( + delegator: &T::AccountId, + delegatee: T::AccountId, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + ) -> DispatchResult { + ensure!(delegator != &delegatee, Error::::NoSelfProxy); + Proxies::::try_mutate(delegator, |(ref mut proxies, ref mut deposit)| { + let proxy_def = ProxyDefinition { + delegate: delegatee.clone(), + proxy_type: proxy_type.clone(), + delay, + }; + let i = proxies + .binary_search(&proxy_def) + .err() + .ok_or(Error::::Duplicate)?; + proxies + .try_insert(i, proxy_def) + .map_err(|_| Error::::TooMany)?; + let new_deposit = Self::deposit(proxies.len() as u32); + match new_deposit.cmp(deposit) { + Ordering::Greater => { + T::Currency::reserve(delegator, new_deposit.saturating_sub(*deposit))?; + } + Ordering::Less => { + T::Currency::unreserve(delegator, deposit.saturating_sub(new_deposit)); + } + Ordering::Equal => (), + } + *deposit = new_deposit; + Self::deposit_event(Event::::ProxyAdded { + delegator: delegator.clone(), + delegatee, + proxy_type, + delay, + }); + Ok(()) + }) + } + + /// Unregister a proxy account for the delegator. + /// + /// Parameters: + /// - `delegator`: The delegator account. + /// - `delegatee`: The account that the `delegator` would like to make a proxy. + /// - `proxy_type`: The permissions allowed for this proxy account. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. + pub fn remove_proxy_delegate( + delegator: &T::AccountId, + delegatee: T::AccountId, + proxy_type: T::ProxyType, + delay: BlockNumberFor, + ) -> DispatchResult { + Proxies::::try_mutate_exists(delegator, |x| { + let (mut proxies, old_deposit) = x.take().ok_or(Error::::NotFound)?; + let proxy_def = ProxyDefinition { + delegate: delegatee.clone(), + proxy_type: proxy_type.clone(), + delay, + }; + let i = proxies + .binary_search(&proxy_def) + .ok() + .ok_or(Error::::NotFound)?; + proxies.remove(i); + let new_deposit = Self::deposit(proxies.len() as u32); + match new_deposit.cmp(&old_deposit) { + Ordering::Greater => { + T::Currency::reserve(delegator, new_deposit.saturating_sub(old_deposit))?; + } + Ordering::Less => { + T::Currency::unreserve(delegator, old_deposit.saturating_sub(new_deposit)); + } + Ordering::Equal => (), + } + if !proxies.is_empty() { + *x = Some((proxies, new_deposit)) + } + Self::deposit_event(Event::::ProxyRemoved { + delegator: delegator.clone(), + delegatee, + proxy_type, + delay, + }); + Ok(()) + }) + } + + pub fn deposit(num_proxies: u32) -> BalanceOf { + if num_proxies == 0 { + Zero::zero() + } else { + T::ProxyDepositBase::get() + .saturating_add(T::ProxyDepositFactor::get().saturating_mul(num_proxies.into())) + } + } + + fn rejig_deposit( + who: &T::AccountId, + old_deposit: BalanceOf, + base: BalanceOf, + factor: BalanceOf, + len: usize, + ) -> Result>, DispatchError> { + let new_deposit = if len == 0 { + BalanceOf::::zero() + } else { + base.saturating_add(factor.saturating_mul((len as u32).into())) + }; + match new_deposit.cmp(&old_deposit) { + Ordering::Greater => { + T::Currency::reserve(who, new_deposit.saturating_sub(old_deposit))?; + } + Ordering::Less => { + T::Currency::unreserve(who, old_deposit.saturating_sub(new_deposit)); + } + Ordering::Equal => (), + } + Ok(if len == 0 { None } else { Some(new_deposit) }) + } + + fn edit_announcements< + F: FnMut(&Announcement, BlockNumberFor>) -> bool, + >( + delegate: &T::AccountId, + mut f: F, + ) -> DispatchResult { + Announcements::::try_mutate_exists(delegate, |x| { + let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; + let orig_pending_len = pending.len(); + pending.retain(&mut f); + ensure!(orig_pending_len > pending.len(), Error::::NotFound); + *x = Self::rejig_deposit( + delegate, + old_deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + pending.len(), + )? + .map(|deposit| (pending, deposit)); + Ok(()) + }) + } + + pub fn find_proxy( + real: &T::AccountId, + delegate: &T::AccountId, + force_proxy_type: Option, + ) -> Result>, DispatchError> { + let f = |x: &ProxyDefinition>| -> bool { + &x.delegate == delegate && force_proxy_type.as_ref().is_none_or(|y| &x.proxy_type == y) + }; + Ok(Proxies::::get(real) + .0 + .into_iter() + .find(f) + .ok_or(Error::::NotProxy)?) + } + + fn do_proxy( + def: ProxyDefinition>, + real: T::AccountId, + call: ::RuntimeCall, + ) { + // This is a freshly authenticated new account, the origin restrictions doesn't apply. + let mut origin: T::RuntimeOrigin = frame_system::RawOrigin::Signed(real).into(); + origin.add_filter(move |c: &::RuntimeCall| { + let c = ::RuntimeCall::from_ref(c); + // We make sure the proxy call does access this pallet to change modify proxies. + match c.is_sub_type() { + // Proxy call cannot add or remove a proxy with more permissions than it already + // has. + Some(Call::add_proxy { ref proxy_type, .. }) + | Some(Call::remove_proxy { ref proxy_type, .. }) + if !def.proxy_type.is_superset(proxy_type) => + { + false + } + // Proxy call cannot remove all proxies or kill pure proxies unless it has full + // permissions. + Some(Call::remove_proxies { .. }) | Some(Call::kill_pure { .. }) + if def.proxy_type != T::ProxyType::default() => + { + false + } + _ => def.proxy_type.filter(c), + } + }); + let e = call.dispatch(origin); + Self::deposit_event(Event::ProxyExecuted { + result: e.map(|_| ()).map_err(|e| e.error), + }); + } + + /// Removes all proxy delegates for a given delegator. + /// + /// Parameters: + /// - `delegator`: The delegator account. + pub fn remove_all_proxy_delegates(delegator: &T::AccountId) { + let (_, old_deposit) = Proxies::::take(delegator); + T::Currency::unreserve(delegator, old_deposit); + } +} diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs new file mode 100644 index 0000000000..04bd0bf566 --- /dev/null +++ b/pallets/proxy/src/tests.rs @@ -0,0 +1,965 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests for Proxy Pallet + +#![cfg(test)] + +use super::*; + +use crate as proxy; +use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode}; +use frame_support::{ + assert_noop, assert_ok, derive_impl, + traits::{ConstU32, ConstU64, Contains}, +}; +use sp_core::H256; +use sp_runtime::{traits::BlakeTwo256, BuildStorage, DispatchError, RuntimeDebug}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system = 1, + Balances: pallet_balances = 2, + Proxy: proxy = 3, + Utility: pallet_utility = 4, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type BaseCallFilter = BaseFilter; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; +} + +impl pallet_utility::Config for Test { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = (); +} + +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + Any, + JustTransfer, + JustUtility, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::JustTransfer => { + matches!( + c, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) + ) + } + ProxyType::JustUtility => matches!(c, RuntimeCall::Utility { .. }), + } + } + fn is_superset(&self, o: &Self) -> bool { + self == &ProxyType::Any || self == o + } +} +pub struct BaseFilter; +impl Contains for BaseFilter { + fn contains(c: &RuntimeCall) -> bool { + match *c { + // Remark is used as a no-op call in the benchmarking + RuntimeCall::System(SystemCall::remark { .. }) => true, + RuntimeCall::System(_) => false, + _ => true, + } + } +} +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ConstU64<1>; + type ProxyDepositFactor = ConstU64<1>; + type MaxProxies = ConstU32<4>; + type WeightInfo = (); + type CallHasher = BlakeTwo256; + type MaxPending = ConstU32<2>; + type AnnouncementDepositBase = ConstU64<1>; + type AnnouncementDepositFactor = ConstU64<1>; +} + +use super::{Call as ProxyCall, Event as ProxyEvent}; +use frame_system::Call as SystemCall; +use pallet_balances::{Call as BalancesCall, Event as BalancesEvent}; +use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; + +type SystemError = frame_system::Error; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default() + .build_storage() + .expect("Expected to not panic"); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)], + } + .assimilate_storage(&mut t) + .expect("Expected to not panic"); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +fn last_events(n: usize) -> Vec { + system::Pallet::::events() + .into_iter() + .rev() + .take(n) + .rev() + .map(|e| e.event) + .collect() +} + +fn expect_events(e: Vec) { + assert_eq!(last_events(e.len()), e); +} + +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) +} + +#[test] +fn announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 1 + )); + System::assert_last_event( + ProxyEvent::ProxyAdded { + delegator: 1, + delegatee: 3, + proxy_type: ProxyType::Any, + delay: 1, + } + .into(), + ); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(2), + 3, + ProxyType::Any, + 1 + )); + assert_eq!(Balances::reserved_balance(3), 0); + + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![ + Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }, + Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1 + }, + ] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + + assert_noop!( + Proxy::announce(RuntimeOrigin::signed(3), 2, [3; 32].into()), + Error::::TooMany + ); + }); +} + +#[test] +fn remove_announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 1 + )); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(2), + 3, + ProxyType::Any, + 1 + )); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); + let e = Error::::NotFound; + assert_noop!( + Proxy::remove_announcement(RuntimeOrigin::signed(3), 1, [0; 32].into()), + e + ); + assert_ok!(Proxy::remove_announcement( + RuntimeOrigin::signed(3), + 1, + [1; 32].into() + )); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + }); +} + +#[test] +fn reject_announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 1 + )); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(2), + 3, + ProxyType::Any, + 1 + )); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); + let e = Error::::NotFound; + assert_noop!( + Proxy::reject_announcement(RuntimeOrigin::signed(1), 3, [0; 32].into()), + e + ); + let e = Error::::NotFound; + assert_noop!( + Proxy::reject_announcement(RuntimeOrigin::signed(4), 3, [1; 32].into()), + e + ); + assert_ok!(Proxy::reject_announcement( + RuntimeOrigin::signed(1), + 3, + [1; 32].into() + )); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + }); +} + +#[test] +fn announcer_must_be_proxy() { + new_test_ext().execute_with(|| { + assert_noop!( + Proxy::announce(RuntimeOrigin::signed(2), 1, H256::zero()), + Error::::NotProxy + ); + }); +} + +#[test] +fn calling_proxy_doesnt_remove_announcement() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 0 + )); + + let call = Box::new(call_transfer(6, 1)); + let call_hash = BlakeTwo256::hash_of(&call); + + assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call)); + + // The announcement is not removed by calling proxy. + let announcements = Announcements::::get(2); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash, + height: 1 + }] + ); + }); +} + +#[test] +fn delayed_requires_pre_announcement() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 1 + )); + let call = Box::new(call_transfer(6, 1)); + let e = Error::::Unannounced; + assert_noop!( + Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone()), + e + ); + let e = Error::::Unannounced; + assert_noop!( + Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone()), + e + ); + let call_hash = BlakeTwo256::hash_of(&call); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); + system::Pallet::::set_block_number(2); + assert_ok!(Proxy::proxy_announced( + RuntimeOrigin::signed(0), + 2, + 1, + None, + call.clone() + )); + }); +} + +#[test] +fn proxy_announced_removes_announcement_and_returns_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 1 + )); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(2), + 3, + ProxyType::Any, + 1 + )); + let call = Box::new(call_transfer(6, 1)); + let call_hash = BlakeTwo256::hash_of(&call); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, call_hash)); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, call_hash)); + // Too early to execute announced call + let e = Error::::Unannounced; + assert_noop!( + Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone()), + e + ); + + system::Pallet::::set_block_number(2); + assert_ok!(Proxy::proxy_announced( + RuntimeOrigin::signed(0), + 3, + 1, + None, + call.clone() + )); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 2, + call_hash, + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + }); +} + +#[test] +fn filtering_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 1000); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 0 + )); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::JustTransfer, + 0 + )); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 4, + ProxyType::JustUtility, + 0 + )); + + let call = Box::new(call_transfer(6, 1)); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + None, + call.clone() + )); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(4), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + + let derivative_id = Utility::derivative_account_id(1, 0); + Balances::make_free_balance_be(&derivative_id, 1000); + let inner = Box::new(call_transfer(6, 1)); + + let call = Box::new(RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: inner.clone(), + })); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + None, + call.clone() + )); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(4), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + + let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { + calls: vec![*inner], + })); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + None, + call.clone() + )); + expect_events(vec![ + UtilityEvent::BatchCompleted.into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), + ]); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(4), + 1, + None, + call.clone() + )); + expect_events(vec![ + UtilityEvent::BatchInterrupted { + index: 0, + error: SystemError::CallFiltered.into(), + } + .into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), + ]); + + let inner = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_add_proxy( + 5, + ProxyType::Any, + 0, + ))); + let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { + calls: vec![*inner], + })); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + None, + call.clone() + )); + expect_events(vec![ + UtilityEvent::BatchCompleted.into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), + ]); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(4), + 1, + None, + call.clone() + )); + expect_events(vec![ + UtilityEvent::BatchInterrupted { + index: 0, + error: SystemError::CallFiltered.into(), + } + .into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), + ]); + + let call = Box::new(RuntimeCall::Proxy(ProxyCall::remove_proxies {})); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(4), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + None, + call.clone() + )); + expect_events(vec![ + BalancesEvent::::Unreserved { who: 1, amount: 5 }.into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), + ]); + }); +} + +#[test] +fn add_remove_proxies_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 0 + )); + assert_noop!( + Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0), + Error::::Duplicate + ); + assert_eq!(Balances::reserved_balance(1), 2); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::JustTransfer, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 3); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 4); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 4, + ProxyType::JustUtility, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 5); + assert_noop!( + Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::Any, 0), + Error::::TooMany + ); + assert_noop!( + Proxy::remove_proxy(RuntimeOrigin::signed(1), 3, ProxyType::JustTransfer, 0), + Error::::NotFound + ); + assert_ok!(Proxy::remove_proxy( + RuntimeOrigin::signed(1), + 4, + ProxyType::JustUtility, + 0 + )); + System::assert_last_event( + ProxyEvent::ProxyRemoved { + delegator: 1, + delegatee: 4, + proxy_type: ProxyType::JustUtility, + delay: 0, + } + .into(), + ); + assert_eq!(Balances::reserved_balance(1), 4); + assert_ok!(Proxy::remove_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 3); + System::assert_last_event( + ProxyEvent::ProxyRemoved { + delegator: 1, + delegatee: 3, + proxy_type: ProxyType::Any, + delay: 0, + } + .into(), + ); + assert_ok!(Proxy::remove_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 2); + System::assert_last_event( + ProxyEvent::ProxyRemoved { + delegator: 1, + delegatee: 2, + proxy_type: ProxyType::Any, + delay: 0, + } + .into(), + ); + assert_ok!(Proxy::remove_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::JustTransfer, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 0); + System::assert_last_event( + ProxyEvent::ProxyRemoved { + delegator: 1, + delegatee: 2, + proxy_type: ProxyType::JustTransfer, + delay: 0, + } + .into(), + ); + assert_noop!( + Proxy::add_proxy(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0), + Error::::NoSelfProxy + ); + }); +} + +#[test] +fn cannot_add_proxy_without_balance() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(5), + 3, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(5), 2); + assert_noop!( + Proxy::add_proxy(RuntimeOrigin::signed(5), 4, ProxyType::Any, 0), + DispatchError::ConsumerRemaining, + ); + }); +} + +#[test] +fn proxying_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::JustTransfer, + 0 + )); + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 0 + )); + + let call = Box::new(call_transfer(6, 1)); + assert_noop!( + Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone()), + Error::::NotProxy + ); + assert_noop!( + Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + Some(ProxyType::Any), + call.clone() + ), + Error::::NotProxy + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + 1, + None, + call.clone() + )); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); + assert_eq!(Balances::free_balance(6), 1); + + let call = Box::new(RuntimeCall::System(SystemCall::set_code { code: vec![] })); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_keep_alive { + dest: 6, + value: 1, + })); + assert_ok!( + RuntimeCall::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) + .dispatch(RuntimeOrigin::signed(2)) + ); + System::assert_last_event( + ProxyEvent::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(3), + 1, + None, + call.clone() + )); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); + assert_eq!(Balances::free_balance(6), 2); + }); +} + +#[test] +fn pure_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 11); // An extra one for the ED. + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(1), + ProxyType::Any, + 0, + 0 + )); + let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None); + System::assert_last_event( + ProxyEvent::PureCreated { + pure: anon, + who: 1, + proxy_type: ProxyType::Any, + disambiguation_index: 0, + } + .into(), + ); + + // other calls to pure allowed as long as they're not exactly the same. + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(1), + ProxyType::JustTransfer, + 0, + 0 + )); + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(1), + ProxyType::Any, + 0, + 1 + )); + let anon2 = Proxy::pure_account(&2, &ProxyType::Any, 0, None); + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(2), + ProxyType::Any, + 0, + 0 + )); + assert_noop!( + Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0), + Error::::Duplicate + ); + System::set_extrinsic_index(1); + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(1), + ProxyType::Any, + 0, + 0 + )); + System::set_extrinsic_index(0); + System::set_block_number(2); + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(1), + ProxyType::Any, + 0, + 0 + )); + + let call = Box::new(call_transfer(6, 1)); + assert_ok!(Balances::transfer_allow_death( + RuntimeOrigin::signed(3), + anon, + 5 + )); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call)); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); + assert_eq!(Balances::free_balance(6), 1); + + let call = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_kill_pure( + 1, + ProxyType::Any, + 0, + 1, + 0, + ))); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(2), + anon2, + None, + call.clone() + )); + let de = DispatchError::from(Error::::NoPermission).stripped(); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); + assert_noop!( + Proxy::kill_pure(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), + Error::::NoPermission + ); + assert_eq!(Balances::free_balance(1), 1); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(1), + anon, + None, + call.clone() + )); + assert_eq!(Balances::free_balance(1), 3); + assert_noop!( + Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone()), + Error::::NotProxy + ); + }); +} diff --git a/pallets/proxy/src/weights.rs b/pallets/proxy/src/weights.rs new file mode 100644 index 0000000000..3093298e3e --- /dev/null +++ b/pallets/proxy/src/weights.rs @@ -0,0 +1,415 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/proxy/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_proxy`. +pub trait WeightInfo { + fn proxy(p: u32, ) -> Weight; + fn proxy_announced(a: u32, p: u32, ) -> Weight; + fn remove_announcement(a: u32, p: u32, ) -> Weight; + fn reject_announcement(a: u32, p: u32, ) -> Weight; + fn announce(a: u32, p: u32, ) -> Weight; + fn add_proxy(p: u32, ) -> Weight; + fn remove_proxy(p: u32, ) -> Weight; + fn remove_proxies(p: u32, ) -> Weight; + fn create_pure(p: u32, ) -> Weight; + fn kill_pure(p: u32, ) -> Weight; +} + +/// Weights for `pallet_proxy` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 18_280_000 picoseconds. + Weight::from_parts(19_655_145, 4706) + // Standard Error: 2_345 + .saturating_add(Weight::from_parts(36_306, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `633 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 41_789_000 picoseconds. + Weight::from_parts(41_812_078, 5698) + // Standard Error: 3_694 + .saturating_add(Weight::from_parts(163_029, 0).saturating_mul(a.into())) + // Standard Error: 3_817 + .saturating_add(Weight::from_parts(79_539, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 22_475_000 picoseconds. + Weight::from_parts(22_666_821, 5698) + // Standard Error: 1_797 + .saturating_add(Weight::from_parts(170_629, 0).saturating_mul(a.into())) + // Standard Error: 1_857 + .saturating_add(Weight::from_parts(18_799, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 22_326_000 picoseconds. + Weight::from_parts(22_654_227, 5698) + // Standard Error: 1_859 + .saturating_add(Weight::from_parts(168_822, 0).saturating_mul(a.into())) + // Standard Error: 1_921 + .saturating_add(Weight::from_parts(21_839, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 31_551_000 picoseconds. + Weight::from_parts(32_205_445, 5698) + // Standard Error: 4_089 + .saturating_add(Weight::from_parts(167_596, 0).saturating_mul(a.into())) + // Standard Error: 4_225 + .saturating_add(Weight::from_parts(67_833, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 21_495_000 picoseconds. + Weight::from_parts(22_358_457, 4706) + // Standard Error: 1_606 + .saturating_add(Weight::from_parts(64_322, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 21_495_000 picoseconds. + Weight::from_parts(22_579_308, 4706) + // Standard Error: 2_571 + .saturating_add(Weight::from_parts(62_404, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 20_541_000 picoseconds. + Weight::from_parts(21_456_750, 4706) + // Standard Error: 1_697 + .saturating_add(Weight::from_parts(45_387, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `4706` + // Minimum execution time: 22_809_000 picoseconds. + Weight::from_parts(23_878_644, 4706) + // Standard Error: 1_600 + .saturating_add(Weight::from_parts(10_149, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `198 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 20_993_000 picoseconds. + Weight::from_parts(22_067_418, 4706) + // Standard Error: 1_673 + .saturating_add(Weight::from_parts(52_703, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 18_280_000 picoseconds. + Weight::from_parts(19_655_145, 4706) + // Standard Error: 2_345 + .saturating_add(Weight::from_parts(36_306, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `633 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 41_789_000 picoseconds. + Weight::from_parts(41_812_078, 5698) + // Standard Error: 3_694 + .saturating_add(Weight::from_parts(163_029, 0).saturating_mul(a.into())) + // Standard Error: 3_817 + .saturating_add(Weight::from_parts(79_539, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 22_475_000 picoseconds. + Weight::from_parts(22_666_821, 5698) + // Standard Error: 1_797 + .saturating_add(Weight::from_parts(170_629, 0).saturating_mul(a.into())) + // Standard Error: 1_857 + .saturating_add(Weight::from_parts(18_799, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `403 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 22_326_000 picoseconds. + Weight::from_parts(22_654_227, 5698) + // Standard Error: 1_859 + .saturating_add(Weight::from_parts(168_822, 0).saturating_mul(a.into())) + // Standard Error: 1_921 + .saturating_add(Weight::from_parts(21_839, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 31_551_000 picoseconds. + Weight::from_parts(32_205_445, 5698) + // Standard Error: 4_089 + .saturating_add(Weight::from_parts(167_596, 0).saturating_mul(a.into())) + // Standard Error: 4_225 + .saturating_add(Weight::from_parts(67_833, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 21_495_000 picoseconds. + Weight::from_parts(22_358_457, 4706) + // Standard Error: 1_606 + .saturating_add(Weight::from_parts(64_322, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 21_495_000 picoseconds. + Weight::from_parts(22_579_308, 4706) + // Standard Error: 2_571 + .saturating_add(Weight::from_parts(62_404, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `161 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 20_541_000 picoseconds. + Weight::from_parts(21_456_750, 4706) + // Standard Error: 1_697 + .saturating_add(Weight::from_parts(45_387, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `4706` + // Minimum execution time: 22_809_000 picoseconds. + Weight::from_parts(23_878_644, 4706) + // Standard Error: 1_600 + .saturating_add(Weight::from_parts(10_149, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `198 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 20_993_000 picoseconds. + Weight::from_parts(22_067_418, 4706) + // Standard Error: 1_673 + .saturating_add(Weight::from_parts(52_703, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index f240245c47..0024991ee1 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -34,6 +34,7 @@ serde_bytes = { workspace = true, features = ["alloc"] } serde_with = { workspace = true, features = ["macros"] } sp-runtime = { workspace = true } sp-std = { workspace = true } +libsecp256k1 = { workspace = true } log = { workspace = true } substrate-fixed = { workspace = true } pallet-transaction-payment = { workspace = true } @@ -43,12 +44,15 @@ hex = { workspace = true } share-pool = { default-features = false, path = "../../primitives/share-pool" } safe-math = { default-features = false, path = "../../primitives/safe-math" } approx = { workspace = true } +subtensor-swap-interface = { workspace = true } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../collective" } pallet-drand = { path = "../drand", default-features = false } pallet-membership = { workspace = true } hex-literal = { workspace = true } -num-traits = { version = "0.2.19", default-features = false, features = ["libm"] } +num-traits = { version = "0.2.19", default-features = false, features = [ + "libm", +] } tle = { workspace = true, default-features = false } ark-bls12-381 = { workspace = true, default-features = false } ark-serialize = { workspace = true, default-features = false } @@ -59,6 +63,7 @@ rand_chacha = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, features = ["std"] } pallet-scheduler = { workspace = true } +pallet-subtensor-swap = { workspace = true } sp-version = { workspace = true } # Substrate sp-tracing = { workspace = true } @@ -71,56 +76,60 @@ pallet-preimage = { workspace = true } [features] default = ["std"] std = [ + "ark-bls12-381/std", + "ark-serialize/std", "codec/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", - "scale-info/std", + "hex/std", + "libsecp256k1/std", + "log/std", + "ndarray/std", + "num-traits/std", + "pallet-balances/std", "pallet-collective/std", + "pallet-drand/std", "pallet-membership/std", - "substrate-fixed/std", - "pallet-balances/std", "pallet-preimage/std", "pallet-scheduler/std", + "pallet-subtensor-swap/std", "pallet-transaction-payment/std", "pallet-utility/std", + "rand_chacha/std", + "safe-math/std", + "scale-info/std", + "serde/std", + "serde_bytes/std", + "serde_json/std", + "serde_with/std", + "sha2/std", + "share-pool/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", "sp-tracing/std", "sp-version/std", - "hex/std", - "log/std", - "ndarray/std", - "serde/std", - "serde_bytes/std", - "serde_with/std", "substrate-fixed/std", - "num-traits/std", - "serde_json/std", + "substrate-fixed/std", + "subtensor-swap-interface/std", "tle/std", - "pallet-drand/std", - "ark-bls12-381/std", - "ark-serialize/std", "w3f-bls/std", - "rand_chacha/std", - "safe-math/std", - "sha2/std", - "share-pool/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "pallet-membership/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-drand/runtime-benchmarks", + "pallet-membership/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", - "pallet-drand/runtime-benchmarks" + "pallet-subtensor-swap/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", @@ -133,7 +142,7 @@ try-runtime = [ "pallet-utility/try-runtime", "sp-runtime/try-runtime", "pallet-collective/try-runtime", - "pallet-drand/try-runtime" + "pallet-drand/try-runtime", ] pow-faucet = [] fast-blocks = [] diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index 776ab15b45..b3b60206dd 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -64,6 +64,13 @@ pub trait SubtensorCustomApi { fn get_subnet_state(&self, netuid: u16, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getLockCost")] fn get_network_lock_cost(&self, at: Option) -> RpcResult; + #[method(name = "subnetInfo_getSelectiveMetagraph")] + fn get_selective_metagraph( + &self, + netuid: u16, + metagraph_index: Vec, + at: Option, + ) -> RpcResult>; } pub struct SubtensorCustom { @@ -390,4 +397,23 @@ where Error::RuntimeError(format!("Unable to get subnet lock cost: {:?}", e)).into() }) } + + fn get_selective_metagraph( + &self, + netuid: u16, + metagraph_index: Vec, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_selective_metagraph(at, netuid, metagraph_index) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!( + "Unable to get selective metagraph: {:?}", + e + )) + .into()), + } + } } diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index c6665bcd97..1a2f34aa9e 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -5,7 +5,7 @@ use codec::Compact; use pallet_subtensor::rpc_info::{ delegate_info::DelegateInfo, dynamic_info::DynamicInfo, - metagraph::Metagraph, + metagraph::{Metagraph, SelectiveMetagraph}, neuron_info::{NeuronInfo, NeuronInfoLite}, show_subnet::SubnetState, stake_info::StakeInfo, @@ -40,12 +40,14 @@ sp_api::decl_runtime_apis! { fn get_metagraph(netuid: u16) -> Option>; fn get_dynamic_info(netuid: u16) -> Option>; fn get_subnet_state(netuid: u16) -> Option>; + fn get_selective_metagraph(netuid: u16, metagraph_indexes: Vec) -> Option>; } pub trait StakeInfoRuntimeApi { fn get_stake_info_for_coldkey( coldkey_account: AccountId32 ) -> Vec>; fn get_stake_info_for_coldkeys( coldkey_accounts: Vec ) -> Vec<(AccountId32, Vec>)>; fn get_stake_info_for_hotkey_coldkey_netuid( hotkey_account: AccountId32, coldkey_account: AccountId32, netuid: u16 ) -> Option>; + fn get_stake_fee( origin: Option<(AccountId32, u16)>, origin_coldkey_account: AccountId32, destination: Option<(AccountId32, u16)>, destination_coldkey_account: AccountId32, amount: u64 ) -> u64; } pub trait SubnetRegistrationRuntimeApi { diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 30d1f39e11..8d4457b0c9 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -6,7 +6,7 @@ use crate::Pallet as Subtensor; use crate::*; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::assert_ok; -use frame_system::RawOrigin; +use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; pub use pallet::*; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Hash}; @@ -594,5 +594,99 @@ batch_reveal_weights { version_keys ) +benchmark_recycle_alpha { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + + // Add alpha to the hotkey + let alpha_amount: u64 = 1000000; + TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + // Verify the alpha has been added + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + +}: recycle_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) + +benchmark_burn_alpha { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid = 1; + let tempo = 1; + let seed = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + + // Add alpha to the hotkey + let alpha_amount: u64 = 1000000; + TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + // Verify the alpha has been added + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + +}: burn_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) + + +benchmark_start_call { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + let current_block: u64 = Subtensor::::get_current_block_as_u64(); + let duration = ::DurationOfStartCall::get(); + let block: BlockNumberFor = (current_block + duration).try_into().ok().expect("can't convert to block number"); + frame_system::Pallet::::set_block_number(block); + +}: start_call(RawOrigin::Signed(coldkey), netuid) } diff --git a/pallets/subtensor/src/coinbase/block_emission.rs b/pallets/subtensor/src/coinbase/block_emission.rs index 1a63c16a07..cd9d778b8a 100644 --- a/pallets/subtensor/src/coinbase/block_emission.rs +++ b/pallets/subtensor/src/coinbase/block_emission.rs @@ -1,7 +1,10 @@ use super::*; use frame_support::traits::Get; use safe_math::*; -use substrate_fixed::{transcendental::log2, types::I96F32}; +use substrate_fixed::{ + transcendental::log2, + types::{I96F32, U96F32}, +}; impl Pallet { /// Calculates the dynamic TAO emission for a given subnet. @@ -31,15 +34,15 @@ impl Pallet { alpha_block_emission: u64, ) -> (u64, u64, u64) { // Init terms. - let mut tao_in_emission: I96F32 = I96F32::saturating_from_num(tao_emission); - let float_alpha_block_emission: I96F32 = I96F32::saturating_from_num(alpha_block_emission); + let mut tao_in_emission: U96F32 = U96F32::saturating_from_num(tao_emission); + let float_alpha_block_emission: U96F32 = U96F32::saturating_from_num(alpha_block_emission); // Get alpha price for subnet. - let alpha_price: I96F32 = Self::get_alpha_price(netuid); + let alpha_price: U96F32 = Self::get_alpha_price(netuid); log::debug!("{:?} - alpha_price: {:?}", netuid, alpha_price); // Get initial alpha_in - let mut alpha_in_emission: I96F32 = I96F32::saturating_from_num(tao_emission) + let mut alpha_in_emission: U96F32 = U96F32::saturating_from_num(tao_emission) .checked_div(alpha_price) .unwrap_or(float_alpha_block_emission); @@ -60,11 +63,11 @@ impl Pallet { } // Avoid rounding errors. - if tao_in_emission < I96F32::saturating_from_num(1) - || alpha_in_emission < I96F32::saturating_from_num(1) + if tao_in_emission < U96F32::saturating_from_num(1) + || alpha_in_emission < U96F32::saturating_from_num(1) { - alpha_in_emission = I96F32::saturating_from_num(0); - tao_in_emission = I96F32::saturating_from_num(0); + alpha_in_emission = U96F32::saturating_from_num(0); + tao_in_emission = U96F32::saturating_from_num(0); } // Set Alpha in emission. diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 669f8e09da..a7e658e89a 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -1,7 +1,7 @@ use super::*; use frame_support::storage::IterableStorageMap; use safe_math::*; -use substrate_fixed::types::{I96F32, I110F18}; +use substrate_fixed::types::{U96F32, U110F18}; impl Pallet { /// Executes the necessary operations for each block. @@ -11,8 +11,8 @@ impl Pallet { // --- 1. Adjust difficulties. Self::adjust_registration_terms_for_networks(); // --- 2. Get the current coinbase emission. - let block_emission: I96F32 = - I96F32::saturating_from_num(Self::get_block_emission().unwrap_or(0)); + let block_emission: U96F32 = + U96F32::saturating_from_num(Self::get_block_emission().unwrap_or(0)); log::debug!("Block emission: {:?}", block_emission); // --- 3. Run emission through network. Self::run_coinbase(block_emission); @@ -191,7 +191,7 @@ impl Pallet { } /// Calculates the upgraded difficulty by multiplying the current difficulty by the ratio ( reg_actual + reg_target / reg_target + reg_target ) - /// We use I110F18 to avoid any overflows on u64. Also min_difficulty and max_difficulty bound the range. + /// We use U110F18 to avoid any overflows on u64. Also min_difficulty and max_difficulty bound the range. /// pub fn upgraded_difficulty( netuid: u16, @@ -199,25 +199,25 @@ impl Pallet { registrations_this_interval: u16, target_registrations_per_interval: u16, ) -> u64 { - let updated_difficulty: I110F18 = I110F18::saturating_from_num(current_difficulty) - .saturating_mul(I110F18::saturating_from_num( + let updated_difficulty: U110F18 = U110F18::saturating_from_num(current_difficulty) + .saturating_mul(U110F18::saturating_from_num( registrations_this_interval.saturating_add(target_registrations_per_interval), )) - .safe_div(I110F18::saturating_from_num( + .safe_div(U110F18::saturating_from_num( target_registrations_per_interval.saturating_add(target_registrations_per_interval), )); - let alpha: I110F18 = I110F18::saturating_from_num(Self::get_adjustment_alpha(netuid)) - .safe_div(I110F18::saturating_from_num(u64::MAX)); - let next_value: I110F18 = alpha - .saturating_mul(I110F18::saturating_from_num(current_difficulty)) + let alpha: U110F18 = U110F18::saturating_from_num(Self::get_adjustment_alpha(netuid)) + .safe_div(U110F18::saturating_from_num(u64::MAX)); + let next_value: U110F18 = alpha + .saturating_mul(U110F18::saturating_from_num(current_difficulty)) .saturating_add( - I110F18::saturating_from_num(1.0) + U110F18::saturating_from_num(1.0) .saturating_sub(alpha) .saturating_mul(updated_difficulty), ); - if next_value >= I110F18::saturating_from_num(Self::get_max_difficulty(netuid)) { + if next_value >= U110F18::saturating_from_num(Self::get_max_difficulty(netuid)) { Self::get_max_difficulty(netuid) - } else if next_value <= I110F18::saturating_from_num(Self::get_min_difficulty(netuid)) { + } else if next_value <= U110F18::saturating_from_num(Self::get_min_difficulty(netuid)) { return Self::get_min_difficulty(netuid); } else { return next_value.saturating_to_num::(); @@ -225,7 +225,7 @@ impl Pallet { } /// Calculates the upgraded burn by multiplying the current burn by the ratio ( reg_actual + reg_target / reg_target + reg_target ) - /// We use I110F18 to avoid any overflows on u64. Also min_burn and max_burn bound the range. + /// We use U110F18 to avoid any overflows on u64. Also min_burn and max_burn bound the range. /// pub fn upgraded_burn( netuid: u16, @@ -233,25 +233,25 @@ impl Pallet { registrations_this_interval: u16, target_registrations_per_interval: u16, ) -> u64 { - let updated_burn: I110F18 = I110F18::saturating_from_num(current_burn) - .saturating_mul(I110F18::saturating_from_num( + let updated_burn: U110F18 = U110F18::saturating_from_num(current_burn) + .saturating_mul(U110F18::saturating_from_num( registrations_this_interval.saturating_add(target_registrations_per_interval), )) - .safe_div(I110F18::saturating_from_num( + .safe_div(U110F18::saturating_from_num( target_registrations_per_interval.saturating_add(target_registrations_per_interval), )); - let alpha: I110F18 = I110F18::saturating_from_num(Self::get_adjustment_alpha(netuid)) - .safe_div(I110F18::saturating_from_num(u64::MAX)); - let next_value: I110F18 = alpha - .saturating_mul(I110F18::saturating_from_num(current_burn)) + let alpha: U110F18 = U110F18::saturating_from_num(Self::get_adjustment_alpha(netuid)) + .safe_div(U110F18::saturating_from_num(u64::MAX)); + let next_value: U110F18 = alpha + .saturating_mul(U110F18::saturating_from_num(current_burn)) .saturating_add( - I110F18::saturating_from_num(1.0) + U110F18::saturating_from_num(1.0) .saturating_sub(alpha) .saturating_mul(updated_burn), ); - if next_value >= I110F18::saturating_from_num(Self::get_max_burn_as_u64(netuid)) { + if next_value >= U110F18::saturating_from_num(Self::get_max_burn_as_u64(netuid)) { Self::get_max_burn_as_u64(netuid) - } else if next_value <= I110F18::saturating_from_num(Self::get_min_burn_as_u64(netuid)) { + } else if next_value <= U110F18::saturating_from_num(Self::get_min_burn_as_u64(netuid)) { return Self::get_min_burn_as_u64(netuid); } else { return next_value.saturating_to_num::(); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 7657d9cd53..00b0c2fa55 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -1,7 +1,7 @@ use super::*; use alloc::collections::BTreeMap; use safe_math::*; -use substrate_fixed::types::I96F32; +use substrate_fixed::types::U96F32; use tle::stream_ciphers::AESGCMStreamCipherProvider; use tle::tlock::tld; @@ -21,7 +21,7 @@ pub struct WeightsTlockPayload { // Distribute dividends to each hotkey macro_rules! asfloat { ($val:expr) => { - I96F32::saturating_from_num($val) + U96F32::saturating_from_num($val) }; } @@ -32,21 +32,29 @@ macro_rules! tou64 { } impl Pallet { - pub fn run_coinbase(block_emission: I96F32) { + pub fn run_coinbase(block_emission: U96F32) { // --- 0. Get current block. let current_block: u64 = Self::get_current_block_as_u64(); log::debug!("Current block: {:?}", current_block); - // --- 1. Get all netuids (filter out root.) + // --- 1. Get all netuids (filter out root) let subnets: Vec = Self::get_all_subnet_netuids() .into_iter() .filter(|netuid| *netuid != 0) .collect(); log::debug!("All subnet netuids: {:?}", subnets); + // Filter out subnets with no first emission block number. + let subnets_to_emit_to: Vec = subnets + .clone() + .into_iter() + .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) + .collect(); + log::debug!("Subnets to emit to: {:?}", subnets_to_emit_to); // --- 2. Get sum of tao reserves ( in a later version we will switch to prices. ) - let mut total_moving_prices: I96F32 = I96F32::saturating_from_num(0.0); - for netuid_i in subnets.iter() { + let mut total_moving_prices: U96F32 = U96F32::saturating_from_num(0.0); + // Only get price EMA for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { // Get and update the moving price of each subnet adding the total together. total_moving_prices = total_moving_prices.saturating_add(Self::get_moving_alpha_price(*netuid_i)); @@ -55,30 +63,31 @@ impl Pallet { // --- 3. Get subnet terms (tao_in, alpha_in, and alpha_out) // Computation is described in detail in the dtao whitepaper. - let mut tao_in: BTreeMap = BTreeMap::new(); - let mut alpha_in: BTreeMap = BTreeMap::new(); - let mut alpha_out: BTreeMap = BTreeMap::new(); - for netuid_i in subnets.iter() { + let mut tao_in: BTreeMap = BTreeMap::new(); + let mut alpha_in: BTreeMap = BTreeMap::new(); + let mut alpha_out: BTreeMap = BTreeMap::new(); + // Only calculate for subnets that we are emitting to. + for netuid_i in subnets_to_emit_to.iter() { // Get subnet price. - let price_i: I96F32 = Self::get_alpha_price(*netuid_i); + let price_i: U96F32 = Self::get_alpha_price(*netuid_i); log::debug!("price_i: {:?}", price_i); // Get subnet TAO. - let moving_price_i: I96F32 = Self::get_moving_alpha_price(*netuid_i); + let moving_price_i: U96F32 = Self::get_moving_alpha_price(*netuid_i); log::debug!("moving_price_i: {:?}", moving_price_i); // Emission is price over total. - let mut tao_in_i: I96F32 = block_emission + let mut tao_in_i: U96F32 = block_emission .saturating_mul(moving_price_i) .checked_div(total_moving_prices) .unwrap_or(asfloat!(0.0)); log::debug!("tao_in_i: {:?}", tao_in_i); // Get alpha_emission total - let alpha_emission_i: I96F32 = asfloat!( + let alpha_emission_i: U96F32 = asfloat!( Self::get_block_emission_for_issuance(Self::get_alpha_issuance(*netuid_i)) .unwrap_or(0) ); log::debug!("alpha_emission_i: {:?}", alpha_emission_i); // Get initial alpha_in - let alpha_in_i: I96F32 = tao_in_i + let alpha_in_i: U96F32 = tao_in_i .checked_div(price_i) .unwrap_or(alpha_emission_i) .min(alpha_emission_i); @@ -103,7 +112,7 @@ impl Pallet { // --- 4. Injection. // Actually perform the injection of alpha_in, alpha_out and tao_in into the subnet pool. // This operation changes the pool liquidity each block. - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Inject Alpha in. let alpha_in_i: u64 = tou64!(*alpha_in.get(netuid_i).unwrap_or(&asfloat!(0))); SubnetAlphaInEmission::::insert(*netuid_i, alpha_in_i); @@ -133,14 +142,14 @@ impl Pallet { // --- 5. Compute owner cuts and remove them from alpha_out remaining. // Remove owner cuts here so that we can properly seperate root dividends in the next step. // Owner cuts are accumulated and then fed to the drain at the end of this func. - let cut_percent: I96F32 = Self::get_float_subnet_owner_cut(); - let mut owner_cuts: BTreeMap = BTreeMap::new(); - for netuid_i in subnets.iter() { + let cut_percent: U96F32 = Self::get_float_subnet_owner_cut(); + let mut owner_cuts: BTreeMap = BTreeMap::new(); + for netuid_i in subnets_to_emit_to.iter() { // Get alpha out. - let alpha_out_i: I96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0)); + let alpha_out_i: U96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0)); log::debug!("alpha_out_i: {:?}", alpha_out_i); // Calculate the owner cut. - let owner_cut_i: I96F32 = alpha_out_i.saturating_mul(cut_percent); + let owner_cut_i: U96F32 = alpha_out_i.saturating_mul(cut_percent); log::debug!("owner_cut_i: {:?}", owner_cut_i); // Save owner cut. *owner_cuts.entry(*netuid_i).or_insert(asfloat!(0)) = owner_cut_i; @@ -154,32 +163,32 @@ impl Pallet { // --- 6. Seperate out root dividends in alpha and sell them into tao. // Then accumulate those dividends for later. - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Get remaining alpha out. - let alpha_out_i: I96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0.0)); + let alpha_out_i: U96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0.0)); log::debug!("alpha_out_i: {:?}", alpha_out_i); // Get total TAO on root. - let root_tao: I96F32 = asfloat!(SubnetTAO::::get(0)); + let root_tao: U96F32 = asfloat!(SubnetTAO::::get(0)); log::debug!("root_tao: {:?}", root_tao); // Get total ALPHA on subnet. - let alpha_issuance: I96F32 = asfloat!(Self::get_alpha_issuance(*netuid_i)); + let alpha_issuance: U96F32 = asfloat!(Self::get_alpha_issuance(*netuid_i)); log::debug!("alpha_issuance: {:?}", alpha_issuance); // Get tao_weight - let tao_weight: I96F32 = root_tao.saturating_mul(Self::get_tao_weight()); + let tao_weight: U96F32 = root_tao.saturating_mul(Self::get_tao_weight()); log::debug!("tao_weight: {:?}", tao_weight); // Get root proportional dividends. - let root_proportion: I96F32 = tao_weight + let root_proportion: U96F32 = tao_weight .checked_div(tao_weight.saturating_add(alpha_issuance)) .unwrap_or(asfloat!(0.0)); log::debug!("root_proportion: {:?}", root_proportion); // Get root proportion of alpha_out dividends. - let root_alpha: I96F32 = root_proportion + let root_alpha: U96F32 = root_proportion .saturating_mul(alpha_out_i) // Total alpha emission per block remaining. .saturating_mul(asfloat!(0.5)); // 50% to validators. // Remove root alpha from alpha_out. log::debug!("root_alpha: {:?}", root_alpha); // Get pending alpha as original alpha_out - root_alpha. - let pending_alpha: I96F32 = alpha_out_i.saturating_sub(root_alpha); + let pending_alpha: U96F32 = alpha_out_i.saturating_sub(root_alpha); log::debug!("pending_alpha: {:?}", pending_alpha); // Sell root emission through the pool. let root_tao: u64 = Self::swap_alpha_for_tao(*netuid_i, tou64!(root_alpha)); @@ -199,12 +208,14 @@ impl Pallet { } // --- 7 Update moving prices after using them in the emission calculation. - for netuid_i in subnets.iter() { + // Only update price EMA for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { // Update moving prices after using them above. Self::update_moving_price(*netuid_i); } // --- 7. Drain pending emission through the subnet based on tempo. + // Run the epoch for *all* subnets, even if we don't emit anything. for &netuid in subnets.iter() { // Pass on subnets that have not reached their tempo. if Self::should_run_epoch(netuid, current_block) { @@ -251,32 +262,13 @@ impl Pallet { } } - pub fn drain_pending_emission( + pub fn calculate_dividends_and_incentives( netuid: u16, - pending_alpha: u64, - pending_tao: u64, - pending_swapped: u64, - owner_cut: u64, - ) { - log::debug!( - "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut - ); - // Setup. - let zero: I96F32 = asfloat!(0.0); - - // Run the epoch. - let hotkey_emission: Vec<(T::AccountId, u64, u64)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); - log::debug!("hotkey_emission: {:?}", hotkey_emission); - + hotkey_emission: Vec<(T::AccountId, u64, u64)>, + ) -> (BTreeMap, BTreeMap) { // Accumulate emission of dividends and incentive per hotkey. let mut incentives: BTreeMap = BTreeMap::new(); - let mut dividends: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); for (hotkey, incentive, dividend) in hotkey_emission { // Accumulate incentives to miners. incentives @@ -285,7 +277,7 @@ impl Pallet { .or_insert(incentive); // Accumulate dividends to parents. let div_tuples: Vec<(T::AccountId, u64)> = - Self::get_dividends_distribution(&hotkey, netuid, dividend); + Self::get_parent_child_dividends_distribution(&hotkey, netuid, dividend); // Accumulate dividends per hotkey. for (parent, parent_div) in div_tuples { dividends @@ -297,54 +289,80 @@ impl Pallet { log::debug!("incentives: {:?}", incentives); log::debug!("dividends: {:?}", dividends); + (incentives, dividends) + } + + pub fn calculate_dividend_distribution( + pending_alpha: u64, + pending_tao: u64, + tao_weight: U96F32, + stake_map: BTreeMap, + dividends: BTreeMap, + ) -> ( + BTreeMap, + BTreeMap, + ) { + log::debug!("dividends: {:?}", dividends); + log::debug!("stake_map: {:?}", stake_map); + log::debug!("pending_alpha: {:?}", pending_alpha); + log::debug!("pending_tao: {:?}", pending_tao); + log::debug!("tao_weight: {:?}", tao_weight); + + // Setup. + let zero: U96F32 = asfloat!(0.0); + // Accumulate root divs and alpha_divs. For each hotkey we compute their // local and root dividend proportion based on their alpha_stake/root_stake - let mut total_root_divs: I96F32 = asfloat!(0); - let mut root_dividends: BTreeMap = BTreeMap::new(); - let mut alpha_dividends: BTreeMap = BTreeMap::new(); + let mut total_root_divs: U96F32 = asfloat!(0); + let mut total_alpha_divs: U96F32 = asfloat!(0); + let mut root_dividends: BTreeMap = BTreeMap::new(); + let mut alpha_dividends: BTreeMap = BTreeMap::new(); for (hotkey, dividend) in dividends { - // Get hotkey ALPHA on subnet. - let alpha_stake = asfloat!(Self::get_stake_for_hotkey_on_subnet(&hotkey, netuid)); - // Get hotkey TAO on root. - let root_stake: I96F32 = asfloat!(Self::get_stake_for_hotkey_on_subnet( - &hotkey, - Self::get_root_netuid() - )); - // Convert TAO to alpha with weight. - let root_alpha: I96F32 = root_stake.saturating_mul(Self::get_tao_weight()); - // Get total from root and local - let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); - // Copmute root prop. - let root_prop: I96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); - // Compute root dividends - let root_divs: I96F32 = dividend.saturating_mul(root_prop); - // Compute alpha dividends - let alpha_divs: I96F32 = dividend.saturating_sub(root_divs); - // Record the alpha dividends. - alpha_dividends - .entry(hotkey.clone()) - .and_modify(|e| *e = e.saturating_add(alpha_divs)) - .or_insert(alpha_divs); - // Record the root dividends. - root_dividends - .entry(hotkey.clone()) - .and_modify(|e| *e = e.saturating_add(root_divs)) - .or_insert(root_divs); - // Accumulate total root divs. - total_root_divs = total_root_divs.saturating_add(root_divs); + if let Some((alpha_stake_u64, root_stake_u64)) = stake_map.get(&hotkey) { + // Get hotkey ALPHA on subnet. + let alpha_stake: U96F32 = asfloat!(*alpha_stake_u64); + // Get hotkey TAO on root. + let root_stake: U96F32 = asfloat!(*root_stake_u64); + + // Convert TAO to alpha with weight. + let root_alpha: U96F32 = root_stake.saturating_mul(tao_weight); + // Get total from root and local + let total_alpha: U96F32 = alpha_stake.saturating_add(root_alpha); + // Compute root prop. + let root_prop: U96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); + // Compute root dividends + let root_divs: U96F32 = dividend.saturating_mul(root_prop); + // Compute alpha dividends + let alpha_divs: U96F32 = dividend.saturating_sub(root_divs); + // Record the alpha dividends. + alpha_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = e.saturating_add(alpha_divs)) + .or_insert(alpha_divs); + // Accumulate total alpha divs. + total_alpha_divs = total_alpha_divs.saturating_add(alpha_divs); + // Record the root dividends. + root_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = e.saturating_add(root_divs)) + .or_insert(root_divs); + // Accumulate total root divs. + total_root_divs = total_root_divs.saturating_add(root_divs); + } } log::debug!("alpha_dividends: {:?}", alpha_dividends); log::debug!("root_dividends: {:?}", root_dividends); log::debug!("total_root_divs: {:?}", total_root_divs); + log::debug!("total_alpha_divs: {:?}", total_alpha_divs); // Compute root divs as TAO. Here we take - let mut tao_dividends: BTreeMap = BTreeMap::new(); + let mut tao_dividends: BTreeMap = BTreeMap::new(); for (hotkey, root_divs) in root_dividends { // Root proportion. - let root_share: I96F32 = root_divs.checked_div(total_root_divs).unwrap_or(zero); + let root_share: U96F32 = root_divs.checked_div(total_root_divs).unwrap_or(zero); log::debug!("hotkey: {:?}, root_share: {:?}", hotkey, root_share); // Root proportion in TAO - let root_tao: I96F32 = asfloat!(pending_tao).saturating_mul(root_share); + let root_tao: U96F32 = asfloat!(pending_tao).saturating_mul(root_share); log::debug!("hotkey: {:?}, root_tao: {:?}", hotkey, root_tao); // Record root dividends as TAO. tao_dividends @@ -354,6 +372,34 @@ impl Pallet { } log::debug!("tao_dividends: {:?}", tao_dividends); + // Compute proportional alpha divs using the pending alpha and total alpha divs from the epoch. + let mut prop_alpha_dividends: BTreeMap = BTreeMap::new(); + for (hotkey, alpha_divs) in alpha_dividends { + // Alpha proportion. + let alpha_share: U96F32 = alpha_divs.checked_div(total_alpha_divs).unwrap_or(zero); + log::debug!("hotkey: {:?}, alpha_share: {:?}", hotkey, alpha_share); + + // Compute the proportional pending_alpha to this hotkey. + let prop_alpha: U96F32 = asfloat!(pending_alpha).saturating_mul(alpha_share); + log::debug!("hotkey: {:?}, prop_alpha: {:?}", hotkey, prop_alpha); + // Record the proportional alpha dividends. + prop_alpha_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = prop_alpha) + .or_insert(prop_alpha); + } + log::debug!("prop_alpha_dividends: {:?}", prop_alpha_dividends); + + (prop_alpha_dividends, tao_dividends) + } + + pub fn distribute_dividends_and_incentives( + netuid: u16, + owner_cut: u64, + incentives: BTreeMap, + alpha_dividends: BTreeMap, + tao_dividends: BTreeMap, + ) { // Distribute the owner cut. if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { @@ -375,8 +421,19 @@ impl Pallet { // Distribute mining incentives. for (hotkey, incentive) in incentives { - // Increase stake for miner. log::debug!("incentives: hotkey: {:?}", incentive); + + if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { + if hotkey == owner_hotkey { + log::debug!( + "incentives: hotkey: {:?} is SN owner hotkey, skipping {:?}", + hotkey, + incentive + ); + continue; // Skip/burn miner-emission for SN owner hotkey. + } + } + // Increase stake for miner. Self::increase_stake_for_hotkey_and_coldkey_on_subnet( &hotkey.clone(), &Owner::::get(hotkey.clone()), @@ -389,7 +446,7 @@ impl Pallet { let _ = AlphaDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); for (hotkey, mut alpha_divs) in alpha_dividends { // Get take prop - let alpha_take: I96F32 = + let alpha_take: U96F32 = Self::get_hotkey_take_float(&hotkey).saturating_mul(alpha_divs); // Remove take prop from alpha_divs alpha_divs = alpha_divs.saturating_sub(alpha_take); @@ -397,24 +454,28 @@ impl Pallet { log::debug!("hotkey: {:?} alpha_take: {:?}", hotkey, alpha_take); Self::increase_stake_for_hotkey_and_coldkey_on_subnet( &hotkey, - &Owner::::get(hotkey.clone()), + &Owner::::get(&hotkey), netuid, tou64!(alpha_take), ); // Give all other nominators. log::debug!("hotkey: {:?} alpha_divs: {:?}", hotkey, alpha_divs); - Self::increase_stake_for_hotkey_on_subnet(&hotkey.clone(), netuid, tou64!(alpha_divs)); + Self::increase_stake_for_hotkey_on_subnet(&hotkey, netuid, tou64!(alpha_divs)); // Record dividends for this hotkey. - AlphaDividendsPerSubnet::::mutate(netuid, hotkey.clone(), |divs| { + AlphaDividendsPerSubnet::::mutate(netuid, &hotkey, |divs| { *divs = divs.saturating_add(tou64!(alpha_divs)); }); + // Record total hotkey alpha based on which this value of AlphaDividendsPerSubnet + // was calculated + let total_hotkey_alpha = TotalHotkeyAlpha::::get(&hotkey, netuid); + TotalHotkeyAlphaLastEpoch::::insert(hotkey, netuid, total_hotkey_alpha); } // Distribute root tao divs. let _ = TaoDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); for (hotkey, mut root_tao) in tao_dividends { // Get take prop - let tao_take: I96F32 = Self::get_hotkey_take_float(&hotkey).saturating_mul(root_tao); + let tao_take: U96F32 = Self::get_hotkey_take_float(&hotkey).saturating_mul(root_tao); // Remove take prop from root_tao root_tao = root_tao.saturating_sub(tao_take); // Give the validator their take. @@ -439,36 +500,144 @@ impl Pallet { } } + pub fn get_stake_map( + netuid: u16, + hotkeys: Vec<&T::AccountId>, + ) -> BTreeMap { + let mut stake_map: BTreeMap = BTreeMap::new(); + for hotkey in hotkeys { + // Get hotkey ALPHA on subnet. + let alpha_stake: u64 = Self::get_stake_for_hotkey_on_subnet(hotkey, netuid); + // Get hotkey TAO on root. + let root_stake: u64 = + Self::get_stake_for_hotkey_on_subnet(hotkey, Self::get_root_netuid()); + stake_map.insert(hotkey.clone(), (alpha_stake, root_stake)); + } + stake_map + } + + pub fn calculate_dividend_and_incentive_distribution( + netuid: u16, + pending_tao: u64, + pending_validator_alpha: u64, + hotkey_emission: Vec<(T::AccountId, u64, u64)>, + tao_weight: U96F32, + ) -> ( + BTreeMap, + ( + BTreeMap, + BTreeMap, + ), + ) { + let (incentives, dividends) = + Self::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let stake_map: BTreeMap = + Self::get_stake_map(netuid, dividends.keys().collect::>()); + + let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + (incentives, (alpha_dividends, tao_dividends)) + } + + pub fn drain_pending_emission( + netuid: u16, + pending_alpha: u64, + pending_tao: u64, + pending_swapped: u64, + owner_cut: u64, + ) { + log::debug!( + "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", + netuid, + pending_alpha, + pending_tao, + pending_swapped, + owner_cut + ); + + let tao_weight = Self::get_tao_weight(); + + // Run the epoch. + let hotkey_emission: Vec<(T::AccountId, u64, u64)> = + Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + log::debug!("hotkey_emission: {:?}", hotkey_emission); + + // Compute the pending validator alpha. + // This is the total alpha being injected, + // minus the the alpha for the miners, (50%) + // and minus the alpha swapped for TAO (pending_swapped). + // Important! If the incentives are 0, then Validators get 100% of the alpha. + let incentive_sum = hotkey_emission + .iter() + .map(|(_, incentive, _)| incentive) + .sum::(); + log::debug!("incentive_sum: {:?}", incentive_sum); + + let pending_validator_alpha: u64 = if incentive_sum != 0 { + pending_alpha + .saturating_add(pending_swapped) + .saturating_div(2) + .saturating_sub(pending_swapped) + } else { + // If the incentive is 0, then Validators get 100% of the alpha. + pending_alpha + }; + + let (incentives, (alpha_dividends, tao_dividends)) = + Self::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + Self::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + alpha_dividends, + tao_dividends, + ); + } + /// Returns the self contribution of a hotkey on a subnet. /// This is the portion of the hotkey's stake that is provided by itself, and not delegated to other hotkeys. pub fn get_self_contribution(hotkey: &T::AccountId, netuid: u16) -> u64 { // Get all childkeys for this hotkey. let childkeys = Self::get_children(hotkey, netuid); - let mut remaining_proportion: I96F32 = I96F32::saturating_from_num(1.0); + let mut remaining_proportion: U96F32 = U96F32::saturating_from_num(1.0); for (proportion, _) in childkeys { remaining_proportion = remaining_proportion.saturating_sub( - I96F32::saturating_from_num(proportion) // Normalize - .safe_div(I96F32::saturating_from_num(u64::MAX)), + U96F32::saturating_from_num(proportion) // Normalize + .safe_div(U96F32::saturating_from_num(u64::MAX)), ); } // Get TAO weight - let tao_weight: I96F32 = Self::get_tao_weight(); + let tao_weight: U96F32 = Self::get_tao_weight(); // Get the hotkey's stake including weight - let root_stake: I96F32 = I96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet( + let root_stake: U96F32 = U96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet( hotkey, Self::get_root_netuid(), )); - let alpha_stake: I96F32 = - I96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, netuid)); + let alpha_stake: U96F32 = + U96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, netuid)); // Calculate the - let alpha_contribution: I96F32 = alpha_stake.saturating_mul(remaining_proportion); - let root_contribution: I96F32 = root_stake + let alpha_contribution: U96F32 = alpha_stake.saturating_mul(remaining_proportion); + let root_contribution: U96F32 = root_stake .saturating_mul(remaining_proportion) .saturating_mul(tao_weight); - let combined_contribution: I96F32 = alpha_contribution.saturating_add(root_contribution); + let combined_contribution: U96F32 = alpha_contribution.saturating_add(root_contribution); // Return the combined contribution as a u64 combined_contribution.saturating_to_num::() @@ -487,7 +656,7 @@ impl Pallet { /// # Returns /// * dividend_tuples: `Vec<(T::AccountId, u64)>` - Vector of (hotkey, divs) for each parent including self. /// - pub fn get_dividends_distribution( + pub fn get_parent_child_dividends_distribution( hotkey: &T::AccountId, netuid: u16, dividends: u64, @@ -496,11 +665,11 @@ impl Pallet { let mut dividend_tuples: Vec<(T::AccountId, u64)> = vec![]; // Calculate the hotkey's share of the validator emission based on its childkey take - let validating_emission: I96F32 = I96F32::saturating_from_num(dividends); - let mut remaining_emission: I96F32 = validating_emission; - let childkey_take_proportion: I96F32 = - I96F32::saturating_from_num(Self::get_childkey_take(hotkey, netuid)) - .safe_div(I96F32::saturating_from_num(u16::MAX)); + let validating_emission: U96F32 = U96F32::saturating_from_num(dividends); + let mut remaining_emission: U96F32 = validating_emission; + let childkey_take_proportion: U96F32 = + U96F32::saturating_from_num(Self::get_childkey_take(hotkey, netuid)) + .safe_div(U96F32::saturating_from_num(u16::MAX)); log::debug!( "Childkey take proportion: {:?} for hotkey {:?}", childkey_take_proportion, @@ -513,14 +682,14 @@ impl Pallet { // Initialize variables to track emission distribution let mut to_parents: u64 = 0; - let mut total_child_emission_take: I96F32 = I96F32::saturating_from_num(0); + let mut total_child_emission_take: U96F32 = U96F32::saturating_from_num(0); // Initialize variables to calculate total stakes from parents - let mut total_contribution: I96F32 = I96F32::saturating_from_num(0); - let mut parent_contributions: Vec<(T::AccountId, I96F32)> = Vec::new(); + let mut total_contribution: U96F32 = U96F32::saturating_from_num(0); + let mut parent_contributions: Vec<(T::AccountId, U96F32)> = Vec::new(); // Get the weights for root and alpha stakes in emission distribution - let tao_weight: I96F32 = Self::get_tao_weight(); + let tao_weight: U96F32 = Self::get_tao_weight(); // Get self contribution, removing any childkey proportions. let self_contribution = Self::get_self_contribution(hotkey, netuid); @@ -532,27 +701,27 @@ impl Pallet { ); // Add self contribution to total contribution but not to the parent contributions. total_contribution = - total_contribution.saturating_add(I96F32::saturating_from_num(self_contribution)); + total_contribution.saturating_add(U96F32::saturating_from_num(self_contribution)); // Calculate total root and alpha (subnet-specific) stakes from all parents for (proportion, parent) in Self::get_parents(hotkey, netuid) { // Convert the parent's stake proportion to a fractional value - let parent_proportion: I96F32 = I96F32::saturating_from_num(proportion) - .safe_div(I96F32::saturating_from_num(u64::MAX)); + let parent_proportion: U96F32 = U96F32::saturating_from_num(proportion) + .safe_div(U96F32::saturating_from_num(u64::MAX)); // Get the parent's root and subnet-specific (alpha) stakes - let parent_root: I96F32 = I96F32::saturating_from_num( + let parent_root: U96F32 = U96F32::saturating_from_num( Self::get_stake_for_hotkey_on_subnet(&parent, Self::get_root_netuid()), ); - let parent_alpha: I96F32 = - I96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(&parent, netuid)); + let parent_alpha: U96F32 = + U96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(&parent, netuid)); // Calculate the parent's contribution to the hotkey's stakes - let parent_alpha_contribution: I96F32 = parent_alpha.saturating_mul(parent_proportion); - let parent_root_contribution: I96F32 = parent_root + let parent_alpha_contribution: U96F32 = parent_alpha.saturating_mul(parent_proportion); + let parent_root_contribution: U96F32 = parent_root .saturating_mul(parent_proportion) .saturating_mul(tao_weight); - let combined_contribution: I96F32 = + let combined_contribution: U96F32 = parent_alpha_contribution.saturating_add(parent_root_contribution); // Add to the total stakes @@ -573,22 +742,22 @@ impl Pallet { let parent_owner = Self::get_owning_coldkey_for_hotkey(&parent); // Get the stake contribution of this parent key of the total stake. - let emission_factor: I96F32 = contribution + let emission_factor: U96F32 = contribution .checked_div(total_contribution) - .unwrap_or(I96F32::saturating_from_num(0)); + .unwrap_or(U96F32::saturating_from_num(0)); // Get the parent's portion of the validating emission based on their contribution. - let mut parent_emission: I96F32 = validating_emission.saturating_mul(emission_factor); + let mut parent_emission: U96F32 = validating_emission.saturating_mul(emission_factor); // Remove this emission from the remaining emission. remaining_emission = remaining_emission.saturating_sub(parent_emission); // Get the childkey take for this parent. - let child_emission_take: I96F32 = if parent_owner == childkey_owner { + let child_emission_take: U96F32 = if parent_owner == childkey_owner { // The parent is from the same coldkey, so we don't remove any childkey take. - I96F32::saturating_from_num(0) + U96F32::saturating_from_num(0) } else { childkey_take_proportion - .saturating_mul(I96F32::saturating_from_num(parent_emission)) + .saturating_mul(U96F32::saturating_from_num(parent_emission)) }; // Remove the childkey take from the parent's emission. @@ -796,6 +965,8 @@ impl Pallet { e ); continue; + } else { + Self::deposit_event(Event::CRV3WeightsRevealed(netuid, who)); }; } diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 9818b06a48..b4f23ced83 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -549,6 +549,24 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { }); } +// Apply column mask to matrix, mask=true will mask out, i.e. set to 0. +// Assumes each column has the same length. +#[allow(dead_code)] +pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { + let Some(first_row) = matrix.first() else { + return; + }; + assert_eq!(mask.len(), first_row.len()); + let zero: I32F32 = I32F32::saturating_from_num(0); + matrix.iter_mut().for_each(|row_elem| { + row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| { + if *mask_col { + *elem = zero; + } + }); + }); +} + // Mask out the diagonal of the input matrix in-place. #[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut [Vec]) { @@ -569,6 +587,53 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { }); } +// Remove cells from sparse matrix where the mask function of a scalar and a vector is true. +#[allow(dead_code, clippy::indexing_slicing)] +pub fn scalar_vec_mask_sparse_matrix( + sparse_matrix: &[Vec<(u16, I32F32)>], + scalar: u64, + vector: &[u64], + mask_fn: &dyn Fn(u64, u64) -> bool, +) -> Vec> { + let n: usize = sparse_matrix.len(); + let mut result: Vec> = vec![vec![]; n]; + for (i, sparse_row) in sparse_matrix.iter().enumerate() { + for (j, value) in sparse_row { + if !mask_fn(scalar, vector[*j as usize]) { + result[i].push((*j, *value)); + } + } + } + result +} + +// Mask out the diagonal of the input matrix in-place, except for the diagonal entry at except_index. +#[allow(dead_code)] +pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: u16) { + let Some(first_row) = matrix.first() else { + return; + }; + if first_row.is_empty() { + return; + } + assert_eq!(matrix.len(), first_row.len()); + + let diag_at_index = matrix + .get(except_index as usize) + .and_then(|row| row.get(except_index as usize)) + .cloned(); + + inplace_mask_diag(matrix); + + matrix.get_mut(except_index as usize).map(|row| { + row.get_mut(except_index as usize).map(|value| { + if let Some(diag_at_index) = diag_at_index { + *value = diag_at_index; + } + }) + }); +} + // Return a new sparse matrix that replaces masked rows with an empty vector placeholder. #[allow(dead_code)] pub fn mask_rows_sparse( @@ -604,6 +669,29 @@ pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec], + except_index: u16, +) -> Vec> { + sparse_matrix + .iter() + .enumerate() + .map(|(i, sparse_row)| { + sparse_row + .iter() + .filter(|(j, _)| { + // Is not a diagonal OR is the diagonal at except_index + i != (*j as usize) || (i == except_index as usize && *j == except_index) + }) + .copied() + .collect() + }) + .collect() +} + // Remove cells from sparse matrix where the mask function of two vectors is true. #[allow(dead_code, clippy::indexing_slicing)] pub fn vec_mask_sparse_matrix( diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index a53c8d562f..62027f9636 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -22,6 +22,10 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::trace!("current_block:\n{:?}\n", current_block); + // Get tempo. + let tempo: u64 = Self::get_tempo(netuid).into(); + log::trace!("tempo: {:?}", tempo); + // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; log::trace!("activity_cutoff:\n{:?}\n", activity_cutoff); @@ -44,7 +48,7 @@ impl Pallet { let block_at_registration: Vec = Self::get_block_at_registration(netuid); log::trace!("Block at registration:\n{:?}\n", &block_at_registration); - // Outdated matrix, updated_ij=True if i has last updated (weights) after j has last registered. + // Outdated matrix, outdated_ij=True if i has last updated (weights) after j has last registered. let outdated: Vec> = last_update .iter() .map(|updated| { @@ -56,6 +60,16 @@ impl Pallet { .collect(); log::trace!("Outdated:\n{:?}\n", &outdated); + // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + let recently_registered: Vec = block_at_registration + .iter() + .map(|registered| last_tempo <= *registered) + .collect(); + log::trace!("Recently registered:\n{:?}\n", &recently_registered); + // =========== // == Stake == // =========== @@ -111,6 +125,9 @@ impl Pallet { // == Weights == // ============= + // Get owner uid. + let owner_uid: Option = Self::get_owner_uid(netuid); + // Access network weights row unnormalized. let mut weights: Vec> = Self::get_weights(netuid); log::trace!("W:\n{:?}\n", &weights); @@ -119,7 +136,13 @@ impl Pallet { inplace_mask_rows(&validator_forbids, &mut weights); log::trace!("W (permit): {:?}", &weights); - // Remove self-weight by masking diagonal. + // Remove self-weight by masking diagonal; keep owner_uid self-weight. + if let Some(owner_uid) = owner_uid { + inplace_mask_diag_except_index(&mut weights, owner_uid); + } else { + inplace_mask_diag(&mut weights); + } + inplace_mask_diag(&mut weights); log::trace!("W (permit+diag):\n{:?}\n", &weights); @@ -176,7 +199,8 @@ impl Pallet { // Access network bonds. let mut bonds: Vec> = Self::get_bonds(netuid); - inplace_mask_matrix(&outdated, &mut bonds); // mask outdated bonds + // Remove bonds referring to neurons that have registered since last tempo. + inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 log::trace!("B:\n{:?}\n", &bonds); @@ -377,6 +401,10 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::trace!("current_block: {:?}", current_block); + // Get tempo. + let tempo: u64 = Self::get_tempo(netuid).into(); + log::trace!("tempo:\n{:?}\n", tempo); + // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; log::trace!("activity_cutoff: {:?}", activity_cutoff); @@ -454,6 +482,8 @@ impl Pallet { // == Weights == // ============= + let owner_uid: Option = Self::get_owner_uid(netuid); + // Access network weights row unnormalized. let mut weights: Vec> = Self::get_weights_sparse(netuid); log::trace!("Weights: {:?}", &weights); @@ -462,8 +492,12 @@ impl Pallet { weights = mask_rows_sparse(&validator_forbids, &weights); log::trace!("Weights (permit): {:?}", &weights); - // Remove self-weight by masking diagonal. - weights = mask_diag_sparse(&weights); + // Remove self-weight by masking diagonal; keep owner_uid self-weight. + if let Some(owner_uid) = owner_uid { + weights = mask_diag_sparse_except_index(&weights, owner_uid); + } else { + weights = mask_diag_sparse(&weights); + } log::trace!("Weights (permit+diag): {:?}", &weights); // Remove weights referring to deregistered neurons. @@ -533,12 +567,15 @@ impl Pallet { let mut bonds: Vec> = Self::get_bonds_sparse(netuid); log::trace!("B: {:?}", &bonds); - // Remove bonds referring to deregistered neurons. - bonds = vec_mask_sparse_matrix( + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( &bonds, - &last_update, + last_tempo, &block_at_registration, - &|updated, registered| updated <= registered, + &|last_tempo, registered| last_tempo <= registered, ); log::trace!("B (outdatedmask): {:?}", &bonds); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 9a4ecdb8ff..e360c307e1 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -77,7 +77,7 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; use pallet_drand::types::RoundNumber; - use sp_core::{ConstU32, H256}; + use sp_core::{ConstU32, H160, H256}; use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; use sp_std::collections::vec_deque::VecDeque; use sp_std::vec; @@ -396,6 +396,11 @@ pub mod pallet { 0 } #[pallet::type_value] + /// Default EMA price halving blocks + pub fn DefaultEMAPriceMovingBlocks() -> u64 { + T::InitialEmaPriceHalvingPeriod::get() + } + #[pallet::type_value] /// Default registrations this block. pub fn DefaultBurn() -> u64 { T::InitialBurn::get() @@ -509,6 +514,12 @@ pub mod pallet { T::InitialNetworkRateLimit::get() } #[pallet::type_value] + /// Default value for weights version key rate limit. + /// In units of tempos. + pub fn DefaultWeightsVersionKeyRateLimit() -> u64 { + 5 // 5 tempos + } + #[pallet::type_value] /// Default value for pending emission. pub fn DefaultPendingEmission() -> u64 { 0 @@ -728,6 +739,10 @@ pub mod pallet { #[pallet::type_value] /// Default value for applying pending items (e.g. childkeys). pub fn DefaultPendingCooldown() -> u64 { + if cfg!(feature = "fast-blocks") { + return 15; + } + 7_200 } @@ -779,6 +794,16 @@ pub mod pallet { I96F32::saturating_from_num(10_000_000) } + #[pallet::type_value] + /// Default value for minimum activity cutoff + pub fn DefaultMinActivityCutoff() -> u16 { + 360 + } + + #[pallet::storage] + pub type MinActivityCutoff = + StorageValue<_, u16, ValueQuery, DefaultMinActivityCutoff>; + #[pallet::storage] pub type ColdkeySwapScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapScheduleDuration>; @@ -999,6 +1024,17 @@ pub mod pallet { ValueQuery, DefaultZeroU64, >; + #[pallet::storage] // --- DMAP ( hot, netuid ) --> alpha | Returns the total amount of alpha a hotkey owned in the last epoch. + pub type TotalHotkeyAlphaLastEpoch = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Identity, + u16, + u64, + ValueQuery, + DefaultZeroU64, + >; #[pallet::storage] /// DMAP ( hot, netuid ) --> total_alpha_shares | Returns the number of alpha shares for a hotkey on a subnet. pub type TotalHotkeyShares = StorageDoubleMap< @@ -1073,6 +1109,10 @@ pub mod pallet { pub type NetworkRateLimit = StorageValue<_, u64, ValueQuery, DefaultNetworkRateLimit>; #[pallet::storage] // --- ITEM( nominator_min_required_stake ) pub type NominatorMinRequiredStake = StorageValue<_, u64, ValueQuery, DefaultZeroU64>; + #[pallet::storage] + /// ITEM( weights_version_key_rate_limit ) --- Rate limit in tempos. + pub type WeightsVersionKeyRateLimit = + StorageValue<_, u64, ValueQuery, DefaultWeightsVersionKeyRateLimit>; /// ============================ /// ==== Subnet Locks ===== @@ -1096,7 +1136,11 @@ pub mod pallet { /// ============================ /// ==== Subnet Parameters ===== /// ============================ - #[pallet::storage] // --- MAP ( netuid ) --> subnet mechanism + /// --- MAP ( netuid ) --> block number of first emission + #[pallet::storage] + pub type FirstEmissionBlockNumber = StorageMap<_, Identity, u16, u64, OptionQuery>; + /// --- MAP ( netuid ) --> subnet mechanism + #[pallet::storage] pub type SubnetMechanism = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultZeroU16>; #[pallet::storage] @@ -1280,6 +1324,10 @@ pub mod pallet { pub type RegistrationsThisBlock = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultRegistrationsThisBlock>; #[pallet::storage] + /// --- MAP ( netuid ) --> Halving time of average moving price. + pub type EMAPriceHalvingBlocks = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultEMAPriceMovingBlocks>; + #[pallet::storage] /// --- MAP ( netuid ) --> global_RAO_recycled_for_registration pub type RAORecycledForRegistration = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultRAORecycledForRegistration>; @@ -1513,6 +1561,14 @@ pub mod pallet { OptionQuery, >; + /// ============================= + /// ==== EVM related storage ==== + /// ============================= + #[pallet::storage] + /// --- DMAP (netuid, uid) --> (H160, last_block_where_ownership_was_proven) + pub type AssociatedEvmAddress = + StorageDoubleMap<_, Twox64Concat, u16, Twox64Concat, u16, (H160, u64), OptionQuery>; + /// ================== /// ==== Genesis ===== /// ================== @@ -1552,15 +1608,21 @@ pub mod pallet { } /// Returns the transaction priority for stake operations. - pub fn get_priority_staking(coldkey: &T::AccountId, hotkey: &T::AccountId) -> u64 { + pub fn get_priority_staking( + coldkey: &T::AccountId, + hotkey: &T::AccountId, + stake_amount: u64, + ) -> u64 { match LastColdkeyHotkeyStakeBlock::::get(coldkey, hotkey) { Some(last_stake_block) => { let current_block_number = Self::get_current_block_as_u64(); let default_priority = current_block_number.saturating_sub(last_stake_block); - default_priority.saturating_add(u32::MAX as u64) + default_priority + .saturating_add(u32::MAX as u64) + .saturating_add(stake_amount) } - None => 0, + None => stake_amount, } } @@ -1689,8 +1751,12 @@ where Pallet::::get_priority_set_weights(who, netuid) } - pub fn get_priority_staking(coldkey: &T::AccountId, hotkey: &T::AccountId) -> u64 { - Pallet::::get_priority_staking(coldkey, hotkey) + pub fn get_priority_staking( + coldkey: &T::AccountId, + hotkey: &T::AccountId, + stake_amount: u64, + ) -> u64 { + Pallet::::get_priority_staking(coldkey, hotkey, stake_amount) } pub fn check_weights_min_stake(who: &T::AccountId, netuid: u16) -> bool { @@ -1905,7 +1971,7 @@ where *amount_staked, false, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *amount_staked), ) } Some(Call::add_stake_limit { @@ -1935,7 +2001,7 @@ where max_amount, *allow_partial, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *amount_staked), ) } Some(Call::remove_stake { @@ -1953,7 +2019,7 @@ where *amount_unstaked, false, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *amount_unstaked), ) } Some(Call::remove_stake_limit { @@ -1976,7 +2042,7 @@ where max_amount, *allow_partial, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *amount_unstaked), ) } Some(Call::move_stake { @@ -2007,7 +2073,7 @@ where None, false, ), - Self::get_priority_staking(who, origin_hotkey), + Self::get_priority_staking(who, origin_hotkey, *alpha_amount), ) } Some(Call::transfer_stake { @@ -2038,7 +2104,7 @@ where None, true, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *alpha_amount), ) } Some(Call::swap_stake { @@ -2068,7 +2134,7 @@ where None, false, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *alpha_amount), ) } Some(Call::swap_stake_limit { @@ -2107,7 +2173,7 @@ where Some(*allow_partial), false, ), - Self::get_priority_staking(who, hotkey), + Self::get_priority_staking(who, hotkey, *alpha_amount), ) } Some(Call::register { netuid, .. } | Call::burned_register { netuid, .. }) => { @@ -2203,9 +2269,13 @@ where self, who: &Self::AccountId, call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, + info: &DispatchInfoOf, + len: usize, ) -> Result { + // We need to perform same checks as Self::validate so that + // the validation is performed during Executive::apply_extrinsic as well. + // this prevents inclusion of invalid tx in a block by malicious block author. + self.validate(who, call, info, len)?; match call.is_sub_type() { Some(Call::add_stake { .. }) => { let transaction_fee = 100000; diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 60140a8ab2..23f117522c 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -5,6 +5,9 @@ use frame_support::pallet_macros::pallet_section; /// This can later be imported into the pallet using [`import_section`]. #[pallet_section] mod config { + use frame_support::pallet_prelude::*; + use subtensor_swap_interface::SwapHandler; + /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config + pallet_drand::Config { @@ -47,6 +50,9 @@ mod config { /// the preimage to store the call data. type Preimages: QueryPreimage + StorePreimage; + /// Swap interface. + type SwapInterface: SwapHandler; + /// ================================= /// ==== Initial Value Constants ==== /// ================================= @@ -207,5 +213,11 @@ mod config { /// Initial TAO weight. #[pallet::constant] type InitialTaoWeight: Get; + /// Initial EMA price halving period + #[pallet::constant] + type InitialEmaPriceHalvingPeriod: Get; + /// Block number after a new subnet accept the start call extrinsic. + #[pallet::constant] + type DurationOfStartCall: Get; } } diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index bbd52e00d3..4ea03c957b 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -9,6 +9,7 @@ mod dispatches { use frame_support::traits::schedule::DispatchTime; use frame_support::traits::schedule::v3::Anon as ScheduleAnon; use frame_system::pallet_prelude::BlockNumberFor; + use sp_core::ecdsa::Signature; use sp_runtime::traits::Saturating; use crate::MAX_CRV3_COMMIT_SIZE_BYTES; @@ -966,7 +967,7 @@ mod dispatches { ) -> DispatchResultWithPostInfo { // Ensure it's called with root privileges (scheduler has root privileges) ensure_root(origin)?; - log::info!("swap_coldkey: {:?} -> {:?}", old_coldkey, new_coldkey); + log::debug!("swap_coldkey: {:?} -> {:?}", old_coldkey, new_coldkey); Self::do_swap_coldkey(&old_coldkey, &new_coldkey, swap_cost) } @@ -1389,40 +1390,42 @@ mod dispatches { .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] pub fn schedule_dissolve_network( - origin: OriginFor, - netuid: u16, + _origin: OriginFor, + _netuid: u16, ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - - let current_block: BlockNumberFor = >::block_number(); - let duration: BlockNumberFor = DissolveNetworkScheduleDuration::::get(); - let when: BlockNumberFor = current_block.saturating_add(duration); - - let call = Call::::dissolve_network { - coldkey: who.clone(), - netuid, - }; - - let bound_call = T::Preimages::bound(LocalCallOf::::from(call.clone())) - .map_err(|_| Error::::FailedToSchedule)?; - - T::Scheduler::schedule( - DispatchTime::At(when), - None, - 63, - frame_system::RawOrigin::Root.into(), - bound_call, - ) - .map_err(|_| Error::::FailedToSchedule)?; - - // Emit the SwapScheduled event - Self::deposit_event(Event::DissolveNetworkScheduled { - account: who.clone(), - netuid, - execution_block: when, - }); - - Ok(().into()) + Err(Error::::CallDisabled.into()) + + // let who = ensure_signed(origin)?; + + // let current_block: BlockNumberFor = >::block_number(); + // let duration: BlockNumberFor = DissolveNetworkScheduleDuration::::get(); + // let when: BlockNumberFor = current_block.saturating_add(duration); + + // let call = Call::::dissolve_network { + // coldkey: who.clone(), + // netuid, + // }; + + // let bound_call = T::Preimages::bound(LocalCallOf::::from(call.clone())) + // .map_err(|_| Error::::FailedToSchedule)?; + + // T::Scheduler::schedule( + // DispatchTime::At(when), + // None, + // 63, + // frame_system::RawOrigin::Root.into(), + // bound_call, + // ) + // .map_err(|_| Error::::FailedToSchedule)?; + + // // Emit the SwapScheduled event + // Self::deposit_event(Event::DissolveNetworkScheduled { + // account: who.clone(), + // netuid, + // execution_block: when, + // }); + + // Ok(().into()) } /// ---- Set prometheus information for the neuron. @@ -1907,5 +1910,118 @@ mod dispatches { Ok(()) } + + /// Initiates a call on a subnet. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be signed by the subnet owner. + /// * `netuid` - The unique identifier of the subnet on which the call is being initiated. + /// + /// # Events + /// Emits a `FirstEmissionBlockNumberSet` event on success. + #[pallet::call_index(92)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(6, 1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { + Self::do_start_call(origin, netuid)?; + Ok(()) + } + + /// Attempts to associate a hotkey with an EVM key. + /// + /// The signature will be checked to see if the recovered public key matches the `evm_key` provided. + /// + /// The EVM key is expected to sign the message according to this formula to produce the signature: + /// ```text + /// keccak_256(hotkey ++ keccak_256(block_number)) + /// ``` + /// + /// # Arguments + /// * `origin` - The origin of the transaction, which must be signed by the coldkey that owns the `hotkey`. + /// * `netuid` - The netuid that the `hotkey` belongs to. + /// * `hotkey` - The hotkey associated with the `origin`. + /// * `evm_key` - The EVM key to associate with the `hotkey`. + /// * `block_number` - The block number used in the `signature`. + /// * `signature` - A signed message by the `evm_key` containing the `hotkey` and the hashed `block_number`. + /// + /// # Errors + /// Returns an error if: + /// * The transaction is not signed. + /// * The hotkey is not owned by the origin coldkey. + /// * The hotkey does not belong to the subnet identified by the netuid. + /// * The EVM key cannot be recovered from the signature. + /// * The EVM key recovered from the signature does not match the given EVM key. + /// + /// # Events + /// May emit a `EvmKeyAssociated` event on success + #[pallet::call_index(93)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn associate_evm_key( + origin: T::RuntimeOrigin, + netuid: u16, + hotkey: T::AccountId, + evm_key: H160, + block_number: u64, + signature: Signature, + ) -> DispatchResult { + Self::do_associate_evm_key(origin, netuid, hotkey, evm_key, block_number, signature) + } + + /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet + /// + /// # Arguments + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of alpha to recycle + /// * `netuid` - The subnet ID + /// + /// # Events + /// Emits a `TokensRecycled` event on success. + #[pallet::call_index(101)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 2)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn recycle_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + Self::do_recycle_alpha(origin, hotkey, amount, netuid) + } + + /// Burns alpha from a cold/hot key pair without reducing `AlphaOut` + /// + /// # Arguments + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of alpha to burn + /// * `netuid` - The subnet ID + /// + /// # Events + /// Emits a `TokensBurned` event on success. + #[pallet::call_index(102)] + #[pallet::weight(( + Weight::from_parts(2_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn burn_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + Self::do_burn_alpha(origin, hotkey, amount, netuid) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 8515b5fe73..052b15d5e6 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -191,5 +191,21 @@ mod errors { SlippageTooHigh, /// Subnet disallows transfer. TransferDisallowed, + /// Activity cutoff is being set too low. + ActivityCutoffTooLow, + /// Call is disabled + CallDisabled, + /// FirstEmissionBlockNumber is already set. + FirstEmissionBlockNumberAlreadySet, + /// need wait for more blocks to accept the start call extrinsic. + NeedWaitingMoreBlocksToStarCall, + /// Not enough AlphaOut on the subnet to recycle + NotEnoughAlphaOutToRecycle, + /// Cannot burn or recycle TAO from root subnet + CannotBurnOrRecycleOnRootSubnet, + /// Public key cannot be recovered. + UnableToRecoverPublicKey, + /// Recovered public key is invalid. + InvalidRecoveredPublicKey, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 834aa901fa..8c2e863d0e 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -14,9 +14,9 @@ mod events { /// a network is removed. NetworkRemoved(u16), /// stake has been transferred from the a coldkey account onto the hotkey staking account. - StakeAdded(T::AccountId, T::AccountId, u64, u64, u16), + StakeAdded(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been removed from the hotkey staking account onto the coldkey account. - StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16), + StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been moved from origin (hotkey, subnet ID) to destination (hotkey, subnet ID) of this amount (in TAO). StakeMoved(T::AccountId, T::AccountId, u16, T::AccountId, u16, u64), /// a caller successfully sets their weights on a subnetwork. @@ -275,5 +275,53 @@ mod events { /// Parameters: /// (netuid, new_hotkey) SubnetOwnerHotkeySet(u16, T::AccountId), + /// FirstEmissionBlockNumber is set via start call extrinsic + /// + /// Parameters: + /// netuid + /// block number + FirstEmissionBlockNumberSet(u16, u64), + + /// Alpha has been recycled, reducing AlphaOut on a subnet. + /// + /// Parameters: + /// (coldkey, hotkey, amount, subnet_id) + AlphaRecycled(T::AccountId, T::AccountId, u64, u16), + + /// Alpha have been burned without reducing AlphaOut. + /// + /// Parameters: + /// (coldkey, hotkey, amount, subnet_id) + AlphaBurned(T::AccountId, T::AccountId, u64, u16), + + /// An EVM key has been associated with a hotkey. + EvmKeyAssociated { + /// The subnet that the hotkey belongs to. + netuid: u16, + /// The hotkey associated with the EVM key. + hotkey: T::AccountId, + /// The EVM key being associated with the hotkey. + evm_key: H160, + /// The block where the association happened. + block_associated: u64, + }, + + /// CRV3 Weights have been successfully revealed. + /// + /// - **netuid**: The network identifier. + /// - **who**: The account ID of the user revealing the weights. + CRV3WeightsRevealed(u16, T::AccountId), + + /// Commit-Reveal periods has been successfully set. + /// + /// - **netuid**: The network identifier. + /// - **periods**: The number of epochs before the reveal. + CommitRevealPeriodsSet(u16, u64), + + /// Commit-Reveal has been successfully toggled. + /// + /// - **netuid**: The network identifier. + /// - **Enabled**: Is Commit-Reveal enabled. + CommitRevealEnabled(u16, bool), } } diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index df9dffabca..49fc4ccfe5 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -81,7 +81,13 @@ mod hooks { // Remove Stake map entries .saturating_add(migrations::migrate_remove_stake_map::migrate_remove_stake_map::()) // Remove unused maps entries - .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()); + .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()) + // Set last emission block number for all existed subnets before start call feature applied + .saturating_add(migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::()) + // Remove all zero value entries in TotalHotkeyAlpha + .saturating_add(migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::()) + // Wipe existing items to prevent bad decoding for new type + .saturating_add(migrations::migrate_upgrade_revealed_commitments::migrate_upgrade_revealed_commitments::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs index ba9d85badc..5bccfff9a0 100644 --- a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs +++ b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs @@ -15,10 +15,7 @@ pub mod deprecated_loaded_emission_format { } pub(crate) fn migrate_init_total_issuance() -> Weight { - // Calculate the total locked tokens across all subnets let subnets_len = crate::SubnetLocked::::iter().count() as u64; - let total_subnet_locked: u64 = - crate::SubnetLocked::::iter().fold(0, |acc, (_, v)| acc.saturating_add(v)); // Retrieve the total balance of all accounts let total_account_balances = <::Currency as fungible::Inspect< @@ -26,15 +23,25 @@ pub(crate) fn migrate_init_total_issuance() -> Weight { >>::total_issuance(); // Get the total stake from the system - let total_stake = crate::TotalStake::::get(); + let prev_total_stake = crate::TotalStake::::get(); + // Calculate new total stake using the sum of all subnet TAO + let total_subnet_tao: u64 = + crate::SubnetTAO::::iter().fold(0, |acc, (_, v)| acc.saturating_add(v)); + + let total_stake = total_subnet_tao; + // Update the total stake in storage + crate::TotalStake::::put(total_stake); + log::info!( + "Subtensor Pallet Total Stake Updated: previous: {:?}, new: {:?}", + prev_total_stake, + total_stake + ); // Retrieve the previous total issuance for logging purposes let prev_total_issuance = crate::TotalIssuance::::get(); // Calculate the new total issuance - let new_total_issuance = total_account_balances - .saturating_add(total_stake) - .saturating_add(total_subnet_locked); + let new_total_issuance = total_account_balances.saturating_add(total_stake); // Update the total issuance in storage crate::TotalIssuance::::put(new_total_issuance); @@ -48,7 +55,7 @@ pub(crate) fn migrate_init_total_issuance() -> Weight { // Return the weight of the operation // We performed subnets_len + 5 reads and 1 write - ::DbWeight::get().reads_writes(subnets_len.saturating_add(5), 1) + ::DbWeight::get().reads_writes(subnets_len.saturating_add(5), 2) } pub mod initialise_total_issuance { diff --git a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs new file mode 100644 index 0000000000..3b45615bf4 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs @@ -0,0 +1,60 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_remove_zero_total_hotkey_alpha() -> Weight { + let migration_name = b"migrate_remove_zero_total_hotkey_alpha".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // ------------------------------ + // Step 0: Check if already run + // ------------------------------ + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Remove any zero entries in TotalHotkeyAlpha + // ------------------------------ + + let mut removed_entries_count = 0u64; + + // For each (hotkey, netuid, alpha) entry, remove if alpha == 0 + for (hotkey, netuid, alpha) in TotalHotkeyAlpha::::iter() { + if alpha == 0 { + TotalHotkeyAlpha::::remove(&hotkey, netuid); + removed_entries_count = removed_entries_count.saturating_add(1); + } + } + + weight = weight.saturating_add(T::DbWeight::get().reads(removed_entries_count)); + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count)); + + log::info!( + "Removed {} zero entries from TotalHotkeyAlpha.", + removed_entries_count + ); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs b/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs new file mode 100644 index 0000000000..04ad306218 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs @@ -0,0 +1,53 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_set_first_emission_block_number() -> Weight { + let migration_name = b"migrate_set_first_emission_block_number".to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{:?}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Set the first emission block for all subnets except root + // ------------------------------ + let netuids = Pallet::::get_all_subnet_netuids(); + let current_block_number = Pallet::::get_current_block_as_u64(); + for netuid in netuids.iter() { + if *netuid != 0 { + FirstEmissionBlockNumber::::insert(netuid, current_block_number); + } + } + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().reads(2)); + + if netuids.is_empty() { + weight = weight.saturating_add(T::DbWeight::get().writes(1_u64)); + } else { + weight = weight.saturating_add(T::DbWeight::get().writes(netuids.len() as u64)); + } + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_subnet_volume.rs b/pallets/subtensor/src/migrations/migrate_subnet_volume.rs index 46835877d9..cbf2007c3d 100644 --- a/pallets/subtensor/src/migrations/migrate_subnet_volume.rs +++ b/pallets/subtensor/src/migrations/migrate_subnet_volume.rs @@ -6,7 +6,7 @@ pub fn migrate_subnet_volume() -> Weight { let migration_name = b"migrate_subnet_volume".to_vec(); // Initialize the weight with one read operation. - let weight = T::DbWeight::get().reads(1); + let mut weight = T::DbWeight::get().reads(1); // Check if the migration has already run if HasMigrationRun::::get(&migration_name) { @@ -29,5 +29,11 @@ pub fn migrate_subnet_volume() -> Weight { }); log::info!("Migrated {} entries in SubnetVolume", migrated); - weight.saturating_add(T::DbWeight::get().reads_writes(migrated, migrated)) + weight = weight.saturating_add(T::DbWeight::get().reads_writes(migrated, migrated)); + + // Mark the migration as completed + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + weight } diff --git a/pallets/subtensor/src/migrations/migrate_upgrade_revealed_commitments.rs b/pallets/subtensor/src/migrations/migrate_upgrade_revealed_commitments.rs new file mode 100644 index 0000000000..ce3bff62ec --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_upgrade_revealed_commitments.rs @@ -0,0 +1,58 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; +use sp_io::{KillStorageResult, hashing::twox_128, storage::clear_prefix}; + +pub fn migrate_upgrade_revealed_commitments() -> Weight { + let migration_name = b"migrate_revealed_commitments_v2".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------------------------------------- + // 1) Clear the old `RevealedCommitments` storage from the `Commitments` pallet + // ------------------------------------------------------------- + let mut revealed_commitments_prefix = Vec::new(); + revealed_commitments_prefix.extend_from_slice(&twox_128("Commitments".as_bytes())); + revealed_commitments_prefix.extend_from_slice(&twox_128("RevealedCommitments".as_bytes())); + + let removal_result = clear_prefix(&revealed_commitments_prefix, Some(u32::MAX)); + let removed_entries_count = match removal_result { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::warn!("Failed to remove some items during `migrate_revealed_commitments`."); + removed as u64 + } + }; + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count)); + + log::info!( + "Removed {} entries from `RevealedCommitments`.", + removed_entries_count + ); + + // ------------------------------------------------------------- + // 2) Mark this migration as completed + // ------------------------------------------------------------- + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 6af6ad2a56..23fb3cde1f 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -11,6 +11,8 @@ pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_remove_stake_map; pub mod migrate_remove_unused_maps_and_values; +pub mod migrate_remove_zero_total_hotkey_alpha; +pub mod migrate_set_first_emission_block_number; pub mod migrate_set_min_burn; pub mod migrate_set_min_difficulty; pub mod migrate_stake_threshold; @@ -19,3 +21,4 @@ pub mod migrate_to_v1_separate_emission; pub mod migrate_to_v2_fixed_total_stake; pub mod migrate_total_issuance; pub mod migrate_transfer_ownership_to_foundation; +pub mod migrate_upgrade_revealed_commitments; diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 3d5ee7537b..fe6ab0ed90 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -107,6 +107,500 @@ pub struct Metagraph { alpha_dividends_per_hotkey: Vec<(AccountId, Compact)>, // List of dividend payout in alpha via subnet. } +#[freeze_struct("182c7375fee9db7b")] +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] +pub struct SelectiveMetagraph { + // Subnet index + netuid: Compact, + + // Name and symbol + name: Option>>, // name + symbol: Option>>, // token symbol + identity: Option>, // identity information. + network_registered_at: Option>, // block at registration + + // Keys for owner. + owner_hotkey: Option, // hotkey + owner_coldkey: Option, // coldkey. + + // Tempo terms. + block: Option>, // block at call. + tempo: Option>, // epoch tempo + last_step: Option>, // last epoch + blocks_since_last_step: Option>, // blocks since last epoch. + + // Subnet emission terms + subnet_emission: Option>, // subnet emission via stao + alpha_in: Option>, // amount of alpha in reserve + alpha_out: Option>, // amount of alpha outstanding + tao_in: Option>, // amount of tao injected per block + alpha_out_emission: Option>, // amount injected in alpha reserves per block + alpha_in_emission: Option>, // amount injected outstanding per block + tao_in_emission: Option>, // amount of tao injected per block + pending_alpha_emission: Option>, // pending alpha to be distributed + pending_root_emission: Option>, // panding tao for root divs to be distributed + subnet_volume: Option>, // volume of the subnet in TAO + moving_price: Option, // subnet moving price. + + // Hparams for epoch + rho: Option>, // subnet rho param + kappa: Option>, // subnet kappa param + + // Validator params + min_allowed_weights: Option>, // min allowed weights per val + max_weights_limit: Option>, // max allowed weights per val + weights_version: Option>, // allowed weights version + weights_rate_limit: Option>, // rate limit on weights. + activity_cutoff: Option>, // validator weights cut off period in blocks + max_validators: Option>, // max allowed validators. + + // Registration + num_uids: Option>, + max_uids: Option>, + burn: Option>, // current burn cost.. + difficulty: Option>, // current difficulty. + registration_allowed: Option, // allows registrations. + pow_registration_allowed: Option, // pow registration enabled. + immunity_period: Option>, // subnet miner immunity period + min_difficulty: Option>, // min pow difficulty + max_difficulty: Option>, // max pow difficulty + min_burn: Option>, // min tao burn + max_burn: Option>, // max tao burn + adjustment_alpha: Option>, // adjustment speed for registration params. + adjustment_interval: Option>, // pow and burn adjustment interval + target_regs_per_interval: Option>, // target registrations per interval + max_regs_per_block: Option>, // max registrations per block. + serving_rate_limit: Option>, // axon serving rate limit + + // CR + commit_reveal_weights_enabled: Option, // Is CR enabled. + commit_reveal_period: Option>, // Commit reveal interval + + // Bonds + liquid_alpha_enabled: Option, // Bonds liquid enabled. + alpha_high: Option>, // Alpha param high + alpha_low: Option>, // Alpha param low + bonds_moving_avg: Option>, // Bonds moving avg + + // Metagraph info. + hotkeys: Option>, // hotkey per UID + coldkeys: Option>, // coldkey per UID + identities: Option>>, // coldkeys identities + axons: Option>, // UID axons. + active: Option>, // Avtive per UID + validator_permit: Option>, // Val permit per UID + pruning_score: Option>>, // Pruning per UID + last_update: Option>>, // Last update per UID + emission: Option>>, // Emission per UID + dividends: Option>>, // Dividends per UID + incentives: Option>>, // Mining incentives per UID + consensus: Option>>, // Consensus per UID + trust: Option>>, // Trust per UID + rank: Option>>, // Rank per UID + block_at_registration: Option>>, // Reg block per UID + alpha_stake: Option>>, // Alpha staked per UID + tao_stake: Option>>, // TAO staked per UID + total_stake: Option>>, // Total stake per UID + + // Dividend break down. + tao_dividends_per_hotkey: Option)>>, // List of dividend payouts in tao via root. + alpha_dividends_per_hotkey: Option)>>, // List of dividend payout in alpha via subnet. +} + +impl SelectiveMetagraph +where + AccountId: TypeInfo + Encode + Decode + Clone, +{ + pub fn merge_value(&mut self, other: &Self, metagraph_index: usize) { + match SelectiveMetagraphIndex::from_index(metagraph_index) { + // Name and symbol + Some(SelectiveMetagraphIndex::Name) => self.name = other.name.clone(), + Some(SelectiveMetagraphIndex::Symbol) => self.symbol = other.symbol.clone(), + Some(SelectiveMetagraphIndex::Identity) => self.identity = other.identity.clone(), + Some(SelectiveMetagraphIndex::NetworkRegisteredAt) => { + self.network_registered_at = other.network_registered_at + } + Some(SelectiveMetagraphIndex::OwnerHotkey) => { + self.owner_hotkey = other.owner_hotkey.clone() + } + Some(SelectiveMetagraphIndex::OwnerColdkey) => { + self.owner_coldkey = other.owner_coldkey.clone() + } + Some(SelectiveMetagraphIndex::Block) => self.block = other.block, + Some(SelectiveMetagraphIndex::Tempo) => self.tempo = other.tempo, + Some(SelectiveMetagraphIndex::LastStep) => self.last_step = other.last_step, + Some(SelectiveMetagraphIndex::BlocksSinceLastStep) => { + self.blocks_since_last_step = other.blocks_since_last_step + } + Some(SelectiveMetagraphIndex::SubnetEmission) => { + self.subnet_emission = other.subnet_emission + } + Some(SelectiveMetagraphIndex::AlphaIn) => self.alpha_in = other.alpha_in, + Some(SelectiveMetagraphIndex::AlphaOut) => self.alpha_out = other.alpha_out, + Some(SelectiveMetagraphIndex::TaoIn) => self.tao_in = other.tao_in, + Some(SelectiveMetagraphIndex::AlphaOutEmission) => { + self.alpha_out_emission = other.alpha_out_emission + } + Some(SelectiveMetagraphIndex::AlphaInEmission) => { + self.alpha_in_emission = other.alpha_in_emission + } + Some(SelectiveMetagraphIndex::TaoInEmission) => { + self.tao_in_emission = other.tao_in_emission + } + Some(SelectiveMetagraphIndex::PendingAlphaEmission) => { + self.pending_alpha_emission = other.pending_alpha_emission + } + Some(SelectiveMetagraphIndex::PendingRootEmission) => { + self.pending_root_emission = other.pending_root_emission + } + Some(SelectiveMetagraphIndex::SubnetVolume) => self.subnet_volume = other.subnet_volume, + Some(SelectiveMetagraphIndex::MovingPrice) => self.moving_price = other.moving_price, + Some(SelectiveMetagraphIndex::Rho) => self.rho = other.rho, + Some(SelectiveMetagraphIndex::Kappa) => self.kappa = other.kappa, + Some(SelectiveMetagraphIndex::MinAllowedWeights) => { + self.min_allowed_weights = other.min_allowed_weights + } + Some(SelectiveMetagraphIndex::MaxWeightsLimit) => { + self.max_weights_limit = other.max_weights_limit + } + Some(SelectiveMetagraphIndex::WeightsVersion) => { + self.weights_version = other.weights_version + } + Some(SelectiveMetagraphIndex::WeightsRateLimit) => { + self.weights_rate_limit = other.weights_rate_limit + } + Some(SelectiveMetagraphIndex::ActivityCutoff) => { + self.activity_cutoff = other.activity_cutoff + } + Some(SelectiveMetagraphIndex::MaxValidators) => { + self.max_validators = other.max_validators + } + Some(SelectiveMetagraphIndex::NumUids) => self.num_uids = other.num_uids, + Some(SelectiveMetagraphIndex::MaxUids) => self.max_uids = other.max_uids, + Some(SelectiveMetagraphIndex::Burn) => self.burn = other.burn, + Some(SelectiveMetagraphIndex::Difficulty) => self.difficulty = other.difficulty, + Some(SelectiveMetagraphIndex::RegistrationAllowed) => { + self.registration_allowed = other.registration_allowed + } + Some(SelectiveMetagraphIndex::PowRegistrationAllowed) => { + self.pow_registration_allowed = other.pow_registration_allowed + } + Some(SelectiveMetagraphIndex::ImmunityPeriod) => { + self.immunity_period = other.immunity_period + } + Some(SelectiveMetagraphIndex::MinDifficulty) => { + self.min_difficulty = other.min_difficulty + } + Some(SelectiveMetagraphIndex::MaxDifficulty) => { + self.max_difficulty = other.max_difficulty + } + Some(SelectiveMetagraphIndex::MinBurn) => self.min_burn = other.min_burn, + Some(SelectiveMetagraphIndex::MaxBurn) => self.max_burn = other.max_burn, + Some(SelectiveMetagraphIndex::AdjustmentAlpha) => { + self.adjustment_alpha = other.adjustment_alpha + } + Some(SelectiveMetagraphIndex::AdjustmentInterval) => { + self.adjustment_interval = other.adjustment_interval + } + Some(SelectiveMetagraphIndex::TargetRegsPerInterval) => { + self.target_regs_per_interval = other.target_regs_per_interval + } + Some(SelectiveMetagraphIndex::MaxRegsPerBlock) => { + self.max_regs_per_block = other.max_regs_per_block + } + Some(SelectiveMetagraphIndex::ServingRateLimit) => { + self.serving_rate_limit = other.serving_rate_limit + } + Some(SelectiveMetagraphIndex::CommitRevealWeightsEnabled) => { + self.commit_reveal_weights_enabled = other.commit_reveal_weights_enabled + } + Some(SelectiveMetagraphIndex::CommitRevealPeriod) => { + self.commit_reveal_period = other.commit_reveal_period + } + Some(SelectiveMetagraphIndex::LiquidAlphaEnabled) => { + self.liquid_alpha_enabled = other.liquid_alpha_enabled + } + Some(SelectiveMetagraphIndex::AlphaHigh) => self.alpha_high = other.alpha_high, + Some(SelectiveMetagraphIndex::AlphaLow) => self.alpha_low = other.alpha_low, + Some(SelectiveMetagraphIndex::BondsMovingAvg) => { + self.bonds_moving_avg = other.bonds_moving_avg + } + Some(SelectiveMetagraphIndex::Hotkeys) => self.hotkeys = other.hotkeys.clone(), + Some(SelectiveMetagraphIndex::Coldkeys) => self.coldkeys = other.coldkeys.clone(), + Some(SelectiveMetagraphIndex::Identities) => self.identities = other.identities.clone(), + Some(SelectiveMetagraphIndex::Axons) => self.axons = other.axons.clone(), + Some(SelectiveMetagraphIndex::Active) => self.active = other.active.clone(), + Some(SelectiveMetagraphIndex::ValidatorPermit) => { + self.validator_permit = other.validator_permit.clone() + } + Some(SelectiveMetagraphIndex::PruningScore) => { + self.pruning_score = other.pruning_score.clone() + } + Some(SelectiveMetagraphIndex::LastUpdate) => { + self.last_update = other.last_update.clone() + } + Some(SelectiveMetagraphIndex::Emission) => self.emission = other.emission.clone(), + Some(SelectiveMetagraphIndex::Dividends) => self.dividends = other.dividends.clone(), + Some(SelectiveMetagraphIndex::Incentives) => self.incentives = other.incentives.clone(), + Some(SelectiveMetagraphIndex::Consensus) => self.consensus = other.consensus.clone(), + Some(SelectiveMetagraphIndex::Trust) => self.trust = other.trust.clone(), + Some(SelectiveMetagraphIndex::Rank) => self.rank = other.rank.clone(), + Some(SelectiveMetagraphIndex::BlockAtRegistration) => { + self.block_at_registration = other.block_at_registration.clone() + } + Some(SelectiveMetagraphIndex::AlphaStake) => { + self.alpha_stake = other.alpha_stake.clone() + } + Some(SelectiveMetagraphIndex::TaoStake) => self.tao_stake = other.tao_stake.clone(), + Some(SelectiveMetagraphIndex::TotalStake) => { + self.total_stake = other.total_stake.clone() + } + Some(SelectiveMetagraphIndex::TaoDividendsPerHotkey) => { + self.tao_dividends_per_hotkey = other.tao_dividends_per_hotkey.clone() + } + Some(SelectiveMetagraphIndex::AlphaDividendsPerHotkey) => { + self.alpha_dividends_per_hotkey = other.alpha_dividends_per_hotkey.clone() + } + + None => {} + }; + } +} + +impl Default for SelectiveMetagraph +where + AccountId: TypeInfo + Encode + Decode + Clone, +{ + fn default() -> Self { + Self { + netuid: 0.into(), + name: None, + symbol: None, + identity: None, + network_registered_at: None, + owner_hotkey: None, + owner_coldkey: None, + block: None, + tempo: None, + last_step: None, + blocks_since_last_step: None, + subnet_emission: None, + alpha_in: None, + alpha_out: None, + tao_in: None, + alpha_out_emission: None, + alpha_in_emission: None, + tao_in_emission: None, + pending_alpha_emission: None, + pending_root_emission: None, + subnet_volume: None, + moving_price: None, + rho: None, + kappa: None, + min_allowed_weights: None, + max_weights_limit: None, + weights_version: None, + weights_rate_limit: None, + activity_cutoff: None, + max_validators: None, + num_uids: None, + max_uids: None, + burn: None, + difficulty: None, + registration_allowed: None, + pow_registration_allowed: None, + immunity_period: None, + min_difficulty: None, + max_difficulty: None, + min_burn: None, + max_burn: None, + adjustment_alpha: None, + adjustment_interval: None, + target_regs_per_interval: None, + max_regs_per_block: None, + serving_rate_limit: None, + commit_reveal_weights_enabled: None, + commit_reveal_period: None, + liquid_alpha_enabled: None, + alpha_high: None, + alpha_low: None, + bonds_moving_avg: None, + hotkeys: None, + coldkeys: None, + identities: None, + axons: None, + active: None, + validator_permit: None, + pruning_score: None, + last_update: None, + emission: None, + dividends: None, + incentives: None, + consensus: None, + trust: None, + rank: None, + block_at_registration: None, + alpha_stake: None, + tao_stake: None, + total_stake: None, + tao_dividends_per_hotkey: None, + alpha_dividends_per_hotkey: None, + } + } +} + +pub enum SelectiveMetagraphIndex { + Name, + Symbol, + Identity, + NetworkRegisteredAt, + OwnerHotkey, + OwnerColdkey, + Block, + Tempo, + LastStep, + BlocksSinceLastStep, + SubnetEmission, + AlphaIn, + AlphaOut, + TaoIn, + AlphaOutEmission, + AlphaInEmission, + TaoInEmission, + PendingAlphaEmission, + PendingRootEmission, + SubnetVolume, + MovingPrice, + Rho, + Kappa, + MinAllowedWeights, + MaxWeightsLimit, + WeightsVersion, + WeightsRateLimit, + ActivityCutoff, + MaxValidators, + NumUids, + MaxUids, + Burn, + Difficulty, + RegistrationAllowed, + PowRegistrationAllowed, + ImmunityPeriod, + MinDifficulty, + MaxDifficulty, + MinBurn, + MaxBurn, + AdjustmentAlpha, + AdjustmentInterval, + TargetRegsPerInterval, + MaxRegsPerBlock, + ServingRateLimit, + CommitRevealWeightsEnabled, + CommitRevealPeriod, + LiquidAlphaEnabled, + AlphaHigh, + AlphaLow, + BondsMovingAvg, + Hotkeys, + Coldkeys, + Identities, + Axons, + Active, + ValidatorPermit, + PruningScore, + LastUpdate, + Emission, + Dividends, + Incentives, + Consensus, + Trust, + Rank, + BlockAtRegistration, + AlphaStake, + TaoStake, + TotalStake, + TaoDividendsPerHotkey, + AlphaDividendsPerHotkey, +} + +impl SelectiveMetagraphIndex { + fn from_index(index: usize) -> Option { + match index { + 0 => Some(SelectiveMetagraphIndex::Name), + 1 => Some(SelectiveMetagraphIndex::Symbol), + 2 => Some(SelectiveMetagraphIndex::Identity), + 3 => Some(SelectiveMetagraphIndex::NetworkRegisteredAt), + 4 => Some(SelectiveMetagraphIndex::OwnerHotkey), + 5 => Some(SelectiveMetagraphIndex::OwnerColdkey), + 6 => Some(SelectiveMetagraphIndex::Block), + 7 => Some(SelectiveMetagraphIndex::Tempo), + 8 => Some(SelectiveMetagraphIndex::LastStep), + 9 => Some(SelectiveMetagraphIndex::BlocksSinceLastStep), + 10 => Some(SelectiveMetagraphIndex::SubnetEmission), + 11 => Some(SelectiveMetagraphIndex::AlphaIn), + 12 => Some(SelectiveMetagraphIndex::AlphaOut), + 13 => Some(SelectiveMetagraphIndex::TaoIn), + 14 => Some(SelectiveMetagraphIndex::AlphaOutEmission), + 15 => Some(SelectiveMetagraphIndex::AlphaInEmission), + 16 => Some(SelectiveMetagraphIndex::TaoInEmission), + 17 => Some(SelectiveMetagraphIndex::PendingAlphaEmission), + 18 => Some(SelectiveMetagraphIndex::PendingRootEmission), + 19 => Some(SelectiveMetagraphIndex::SubnetVolume), + 20 => Some(SelectiveMetagraphIndex::MovingPrice), + 21 => Some(SelectiveMetagraphIndex::Rho), + 22 => Some(SelectiveMetagraphIndex::Kappa), + 23 => Some(SelectiveMetagraphIndex::MinAllowedWeights), + 24 => Some(SelectiveMetagraphIndex::MaxWeightsLimit), + 25 => Some(SelectiveMetagraphIndex::WeightsVersion), + 26 => Some(SelectiveMetagraphIndex::WeightsRateLimit), + 27 => Some(SelectiveMetagraphIndex::ActivityCutoff), + 28 => Some(SelectiveMetagraphIndex::MaxValidators), + 29 => Some(SelectiveMetagraphIndex::NumUids), + 30 => Some(SelectiveMetagraphIndex::MaxUids), + 31 => Some(SelectiveMetagraphIndex::Burn), + 32 => Some(SelectiveMetagraphIndex::Difficulty), + 33 => Some(SelectiveMetagraphIndex::RegistrationAllowed), + 34 => Some(SelectiveMetagraphIndex::PowRegistrationAllowed), + 35 => Some(SelectiveMetagraphIndex::ImmunityPeriod), + 36 => Some(SelectiveMetagraphIndex::MinDifficulty), + 37 => Some(SelectiveMetagraphIndex::MaxDifficulty), + 38 => Some(SelectiveMetagraphIndex::MinBurn), + 39 => Some(SelectiveMetagraphIndex::MaxBurn), + 40 => Some(SelectiveMetagraphIndex::AdjustmentAlpha), + 41 => Some(SelectiveMetagraphIndex::AdjustmentInterval), + 42 => Some(SelectiveMetagraphIndex::TargetRegsPerInterval), + 43 => Some(SelectiveMetagraphIndex::MaxRegsPerBlock), + 44 => Some(SelectiveMetagraphIndex::ServingRateLimit), + 45 => Some(SelectiveMetagraphIndex::CommitRevealWeightsEnabled), + 46 => Some(SelectiveMetagraphIndex::CommitRevealPeriod), + 47 => Some(SelectiveMetagraphIndex::LiquidAlphaEnabled), + 48 => Some(SelectiveMetagraphIndex::AlphaHigh), + 49 => Some(SelectiveMetagraphIndex::AlphaLow), + 50 => Some(SelectiveMetagraphIndex::BondsMovingAvg), + 51 => Some(SelectiveMetagraphIndex::Hotkeys), + 52 => Some(SelectiveMetagraphIndex::Coldkeys), + 53 => Some(SelectiveMetagraphIndex::Identities), + 54 => Some(SelectiveMetagraphIndex::Axons), + 55 => Some(SelectiveMetagraphIndex::Active), + 56 => Some(SelectiveMetagraphIndex::ValidatorPermit), + 57 => Some(SelectiveMetagraphIndex::PruningScore), + 58 => Some(SelectiveMetagraphIndex::LastUpdate), + 59 => Some(SelectiveMetagraphIndex::Emission), + 60 => Some(SelectiveMetagraphIndex::Dividends), + 61 => Some(SelectiveMetagraphIndex::Incentives), + 62 => Some(SelectiveMetagraphIndex::Consensus), + 63 => Some(SelectiveMetagraphIndex::Trust), + 64 => Some(SelectiveMetagraphIndex::Rank), + 65 => Some(SelectiveMetagraphIndex::BlockAtRegistration), + 66 => Some(SelectiveMetagraphIndex::AlphaStake), + 67 => Some(SelectiveMetagraphIndex::TaoStake), + 68 => Some(SelectiveMetagraphIndex::TotalStake), + 69 => Some(SelectiveMetagraphIndex::TaoDividendsPerHotkey), + 70 => Some(SelectiveMetagraphIndex::AlphaDividendsPerHotkey), + _ => None, + } + } +} impl Pallet { pub fn get_metagraph(netuid: u16) -> Option> { if !Self::if_subnet_exist(netuid) { @@ -291,4 +785,683 @@ impl Pallet { } metagraphs } + + pub fn get_selective_metagraph( + netuid: u16, + metagraph_indexes: Vec, + ) -> Option> { + if !Self::if_subnet_exist(netuid) { + None + } else { + let mut result = SelectiveMetagraph::default(); + for index in metagraph_indexes.iter() { + let value = Self::get_single_selective_metagraph(netuid, *index); + result.merge_value(&value, *index as usize); + } + Some(result) + } + } + + fn get_single_selective_metagraph( + netuid: u16, + metagraph_index: u16, + ) -> SelectiveMetagraph { + match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { + // Name and symbol + Some(SelectiveMetagraphIndex::Name) => SelectiveMetagraph { + netuid: netuid.into(), + name: Some( + Self::get_name_for_subnet(netuid) + .into_iter() + .map(Compact) + .collect(), + ), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::Symbol) => SelectiveMetagraph { + netuid: netuid.into(), + symbol: Some( + Self::get_symbol_for_subnet(netuid) + .into_iter() + .map(Compact) + .collect(), + ), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::Identity) => SelectiveMetagraph { + netuid: netuid.into(), + identity: Some(SubnetIdentitiesV2::::get(netuid)), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::NetworkRegisteredAt) => SelectiveMetagraph { + netuid: netuid.into(), + network_registered_at: Some(NetworkRegisteredAt::::get(netuid).into()), + ..Default::default() + }, + + // Keys for owner. + Some(SelectiveMetagraphIndex::OwnerHotkey) => SelectiveMetagraph { + netuid: netuid.into(), + owner_hotkey: Some(SubnetOwnerHotkey::::get(netuid)), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::OwnerColdkey) => SelectiveMetagraph { + netuid: netuid.into(), + owner_coldkey: Some(SubnetOwner::::get(netuid)), + ..Default::default() + }, + + // Tempo terms. + Some(SelectiveMetagraphIndex::Block) => SelectiveMetagraph { + netuid: netuid.into(), + block: Some(Pallet::::get_current_block_as_u64().into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::Tempo) => SelectiveMetagraph { + netuid: netuid.into(), + tempo: Some(Self::get_tempo(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::LastStep) => SelectiveMetagraph { + netuid: netuid.into(), + last_step: Some(LastMechansimStepBlock::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::BlocksSinceLastStep) => { + let current_block: u64 = Pallet::::get_current_block_as_u64(); + let last_step = LastMechansimStepBlock::::get(netuid); + let blocks_since_last_step: u64 = current_block.saturating_sub(last_step); + SelectiveMetagraph { + netuid: netuid.into(), + blocks_since_last_step: Some(blocks_since_last_step.into()), + ..Default::default() + } + } + + // Subnet emission terms + Some(SelectiveMetagraphIndex::SubnetEmission) => SelectiveMetagraph { + netuid: netuid.into(), + subnet_emission: Some(0.into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AlphaIn) => SelectiveMetagraph { + netuid: netuid.into(), + alpha_in: Some(SubnetAlphaIn::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AlphaOut) => SelectiveMetagraph { + netuid: netuid.into(), + alpha_out: Some(SubnetAlphaOut::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::TaoIn) => SelectiveMetagraph { + netuid: netuid.into(), + tao_in: Some(SubnetTAO::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AlphaOutEmission) => SelectiveMetagraph { + netuid: netuid.into(), + alpha_out_emission: Some(SubnetAlphaOutEmission::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AlphaInEmission) => SelectiveMetagraph { + netuid: netuid.into(), + alpha_in_emission: Some(SubnetAlphaInEmission::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::TaoInEmission) => SelectiveMetagraph { + netuid: netuid.into(), + tao_in_emission: Some(SubnetTaoInEmission::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::PendingAlphaEmission) => SelectiveMetagraph { + netuid: netuid.into(), + pending_alpha_emission: Some(PendingEmission::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::PendingRootEmission) => SelectiveMetagraph { + netuid: netuid.into(), + pending_root_emission: Some(PendingRootDivs::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::SubnetVolume) => SelectiveMetagraph { + netuid: netuid.into(), + subnet_volume: Some(SubnetVolume::::get(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MovingPrice) => SelectiveMetagraph { + netuid: netuid.into(), + moving_price: Some(SubnetMovingPrice::::get(netuid)), + ..Default::default() + }, + + // Hparams for epoch + Some(SelectiveMetagraphIndex::Rho) => SelectiveMetagraph { + netuid: netuid.into(), + rho: Some(Self::get_rho(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::Kappa) => SelectiveMetagraph { + netuid: netuid.into(), + kappa: Some(Self::get_kappa(netuid).into()), + ..Default::default() + }, + + // Validator params + Some(SelectiveMetagraphIndex::MinAllowedWeights) => SelectiveMetagraph { + netuid: netuid.into(), + min_allowed_weights: Some(Self::get_min_allowed_weights(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MaxWeightsLimit) => SelectiveMetagraph { + netuid: netuid.into(), + max_weights_limit: Some(Self::get_max_weight_limit(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::WeightsVersion) => SelectiveMetagraph { + netuid: netuid.into(), + weights_version: Some(Self::get_weights_version_key(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::WeightsRateLimit) => SelectiveMetagraph { + netuid: netuid.into(), + weights_rate_limit: Some(Self::get_weights_set_rate_limit(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::ActivityCutoff) => SelectiveMetagraph { + netuid: netuid.into(), + activity_cutoff: Some(Self::get_activity_cutoff(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MaxValidators) => SelectiveMetagraph { + netuid: netuid.into(), + max_validators: Some(Self::get_max_allowed_validators(netuid).into()), + ..Default::default() + }, + + // Registration + Some(SelectiveMetagraphIndex::NumUids) => SelectiveMetagraph { + netuid: netuid.into(), + num_uids: Some(Self::get_subnetwork_n(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MaxUids) => SelectiveMetagraph { + netuid: netuid.into(), + max_uids: Some(Self::get_max_allowed_uids(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::RegistrationAllowed) => SelectiveMetagraph { + netuid: netuid.into(), + registration_allowed: Some(Self::get_network_registration_allowed(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::PowRegistrationAllowed) => SelectiveMetagraph { + netuid: netuid.into(), + pow_registration_allowed: Some( + Self::get_network_pow_registration_allowed(netuid).into(), + ), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::Difficulty) => SelectiveMetagraph { + netuid: netuid.into(), + difficulty: Some(Self::get_difficulty_as_u64(netuid).into()), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::Burn) => SelectiveMetagraph { + netuid: netuid.into(), + burn: Some(Self::get_burn_as_u64(netuid).into()), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::ImmunityPeriod) => SelectiveMetagraph { + netuid: netuid.into(), + immunity_period: Some(Self::get_immunity_period(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MinDifficulty) => SelectiveMetagraph { + netuid: netuid.into(), + min_difficulty: Some(Self::get_min_difficulty(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MaxDifficulty) => SelectiveMetagraph { + netuid: netuid.into(), + max_difficulty: Some(Self::get_max_difficulty(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MinBurn) => SelectiveMetagraph { + netuid: netuid.into(), + min_burn: Some(Self::get_min_burn_as_u64(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MaxBurn) => SelectiveMetagraph { + netuid: netuid.into(), + max_burn: Some(Self::get_max_burn_as_u64(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AdjustmentAlpha) => SelectiveMetagraph { + netuid: netuid.into(), + adjustment_alpha: Some(Self::get_adjustment_alpha(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AdjustmentInterval) => SelectiveMetagraph { + netuid: netuid.into(), + adjustment_interval: Some(Self::get_adjustment_interval(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::TargetRegsPerInterval) => SelectiveMetagraph { + netuid: netuid.into(), + target_regs_per_interval: Some( + Self::get_target_registrations_per_interval(netuid).into(), + ), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::MaxRegsPerBlock) => SelectiveMetagraph { + netuid: netuid.into(), + max_regs_per_block: Some(Self::get_max_registrations_per_block(netuid).into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::ServingRateLimit) => SelectiveMetagraph { + netuid: netuid.into(), + serving_rate_limit: Some(Self::get_serving_rate_limit(netuid).into()), + ..Default::default() + }, + + // CR + Some(SelectiveMetagraphIndex::CommitRevealWeightsEnabled) => SelectiveMetagraph { + netuid: netuid.into(), + commit_reveal_weights_enabled: Some(Self::get_commit_reveal_weights_enabled( + netuid, + )), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::CommitRevealPeriod) => SelectiveMetagraph { + netuid: netuid.into(), + commit_reveal_period: Some(Self::get_reveal_period(netuid).into()), + ..Default::default() + }, + + // Bonds + Some(SelectiveMetagraphIndex::LiquidAlphaEnabled) => SelectiveMetagraph { + netuid: netuid.into(), + liquid_alpha_enabled: Some(Self::get_liquid_alpha_enabled(netuid)), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AlphaHigh) => SelectiveMetagraph { + netuid: netuid.into(), + alpha_high: Some(Self::get_alpha_values(netuid).1.into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::AlphaLow) => SelectiveMetagraph { + netuid: netuid.into(), + alpha_low: Some(Self::get_alpha_values(netuid).0.into()), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { + netuid: netuid.into(), + bonds_moving_avg: Some(Self::get_bonds_moving_average(netuid).into()), + ..Default::default() + }, + + // Metagraph info. + Some(SelectiveMetagraphIndex::Hotkeys) => { + let n: u16 = Self::get_subnetwork_n(netuid); + let mut hotkeys: Vec = vec![]; + for uid in 0..n { + let hotkey = Keys::::get(netuid, uid); + hotkeys.push(hotkey.clone()); + } + + SelectiveMetagraph { + netuid: netuid.into(), + hotkeys: Some(hotkeys), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::Coldkeys) => { + let n: u16 = Self::get_subnetwork_n(netuid); + let mut coldkeys: Vec = vec![]; + for uid in 0..n { + let hotkey = Keys::::get(netuid, uid); + let coldkey = Owner::::get(hotkey.clone()); + coldkeys.push(coldkey.clone()); + } + SelectiveMetagraph { + netuid: netuid.into(), + coldkeys: Some(coldkeys), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::Identities) => { + let n: u16 = Self::get_subnetwork_n(netuid); + let mut identities: Vec> = vec![]; + for uid in 0..n { + let hotkey = Keys::::get(netuid, uid); + let coldkey = Owner::::get(hotkey.clone()); + identities.push(IdentitiesV2::::get(coldkey.clone())); + } + SelectiveMetagraph { + netuid: netuid.into(), + identities: Some(identities), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::Axons) => { + let n: u16 = Self::get_subnetwork_n(netuid); + let mut axons: Vec = vec![]; + for uid in 0..n { + let hotkey = Keys::::get(netuid, uid); + axons.push(Self::get_axon_info(netuid, &hotkey)); + } + SelectiveMetagraph { + netuid: netuid.into(), + axons: Some(axons), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::Active) => SelectiveMetagraph { + netuid: netuid.into(), + active: Some(Active::::get(netuid)), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::ValidatorPermit) => SelectiveMetagraph { + netuid: netuid.into(), + active: Some(ValidatorPermit::::get(netuid)), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::PruningScore) => SelectiveMetagraph { + netuid: netuid.into(), + pruning_score: Some( + PruningScores::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { + netuid: netuid.into(), + last_update: Some( + LastUpdate::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::Emission) => SelectiveMetagraph { + netuid: netuid.into(), + emission: Some( + Emission::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::Dividends) => SelectiveMetagraph { + netuid: netuid.into(), + dividends: Some( + Dividends::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { + netuid: netuid.into(), + incentives: Some( + Incentive::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::Consensus) => SelectiveMetagraph { + netuid: netuid.into(), + consensus: Some( + Consensus::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::Trust) => SelectiveMetagraph { + netuid: netuid.into(), + trust: Some( + Trust::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::Rank) => SelectiveMetagraph { + netuid: netuid.into(), + rank: Some( + Rank::::get(netuid) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + Some(SelectiveMetagraphIndex::BlockAtRegistration) => { + let n: u16 = Self::get_subnetwork_n(netuid); + let mut block_at_registration: Vec> = vec![]; + for uid in 0..n { + block_at_registration.push(BlockAtRegistration::::get(netuid, uid).into()); + } + SelectiveMetagraph { + netuid: netuid.into(), + block_at_registration: Some(block_at_registration), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::AlphaStake) => { + let (_, alpha_stake_fl, _): (Vec, Vec, Vec) = + Self::get_stake_weights_for_network(netuid); + SelectiveMetagraph { + netuid: netuid.into(), + alpha_stake: Some( + alpha_stake_fl + .iter() + .map(|xi| Compact::from(fixed64_to_u64(*xi))) + .collect::>>(), + ), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::TaoStake) => { + let (_, _, tao_stake_fl): (Vec, Vec, Vec) = + Self::get_stake_weights_for_network(netuid); + SelectiveMetagraph { + netuid: netuid.into(), + tao_stake: Some( + tao_stake_fl + .iter() + .map(|xi| Compact::from(fixed64_to_u64(*xi))) + .collect::>>(), + ), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::TotalStake) => { + let (total_stake_fl, _, _): (Vec, Vec, Vec) = + Self::get_stake_weights_for_network(netuid); + SelectiveMetagraph { + netuid: netuid.into(), + total_stake: Some( + total_stake_fl + .iter() + .map(|xi| Compact::from(fixed64_to_u64(*xi))) + .collect::>>(), + ), + ..Default::default() + } + } + + // Dividend break down. + Some(SelectiveMetagraphIndex::TaoDividendsPerHotkey) => { + let n: u16 = Self::get_subnetwork_n(netuid); + let mut hotkeys: Vec = vec![]; + for uid in 0..n { + let hotkey = Keys::::get(netuid, uid); + hotkeys.push(hotkey.clone()); + } + let mut tao_dividends_per_hotkey: Vec<(T::AccountId, Compact)> = vec![]; + for hotkey in hotkeys.clone() { + let tao_divs = TaoDividendsPerSubnet::::get(netuid, hotkey.clone()); + tao_dividends_per_hotkey.push((hotkey.clone(), tao_divs.into())); + } + SelectiveMetagraph { + netuid: netuid.into(), + tao_dividends_per_hotkey: Some(tao_dividends_per_hotkey), + ..Default::default() + } + } + Some(SelectiveMetagraphIndex::AlphaDividendsPerHotkey) => { + let mut alpha_dividends_per_hotkey: Vec<(T::AccountId, Compact)> = vec![]; + let n: u16 = Self::get_subnetwork_n(netuid); + let mut hotkeys: Vec = vec![]; + + for uid in 0..n { + let hotkey = Keys::::get(netuid, uid); + hotkeys.push(hotkey.clone()); + } + + for hotkey in hotkeys.clone() { + let alpha_divs = AlphaDividendsPerSubnet::::get(netuid, hotkey.clone()); + alpha_dividends_per_hotkey.push((hotkey.clone(), alpha_divs.into())); + } + SelectiveMetagraph { + netuid: netuid.into(), + alpha_dividends_per_hotkey: Some(alpha_dividends_per_hotkey), + ..Default::default() + } + } + None => SelectiveMetagraph { + // Subnet index + netuid: netuid.into(), + ..Default::default() + }, + } + } +} + +#[test] +fn test_selective_metagraph() { + let mut metagraph = SelectiveMetagraph::::default(); + let expected = SelectiveMetagraph:: { + netuid: 0_u16.into(), + name: None, + symbol: None, + identity: None, + network_registered_at: None, + owner_hotkey: None, + owner_coldkey: None, + block: None, + tempo: None, + last_step: None, + blocks_since_last_step: None, + subnet_emission: None, + alpha_in: None, + alpha_out: None, + tao_in: None, + alpha_out_emission: None, + alpha_in_emission: None, + tao_in_emission: None, + pending_alpha_emission: None, + pending_root_emission: None, + subnet_volume: None, + moving_price: None, + rho: None, + kappa: None, + min_allowed_weights: None, + max_weights_limit: None, + weights_version: None, + weights_rate_limit: None, + activity_cutoff: None, + max_validators: None, + num_uids: None, + max_uids: None, + burn: None, + difficulty: None, + registration_allowed: None, + pow_registration_allowed: None, + immunity_period: None, + min_difficulty: None, + max_difficulty: None, + min_burn: None, + max_burn: None, + adjustment_alpha: None, + adjustment_interval: None, + target_regs_per_interval: None, + max_regs_per_block: None, + serving_rate_limit: None, + commit_reveal_weights_enabled: None, + commit_reveal_period: None, + liquid_alpha_enabled: None, + alpha_high: None, + alpha_low: None, + bonds_moving_avg: None, + hotkeys: None, + coldkeys: None, + identities: None, + axons: None, + active: None, + validator_permit: None, + pruning_score: None, + last_update: None, + emission: None, + dividends: None, + incentives: None, + consensus: None, + trust: None, + rank: None, + block_at_registration: None, + alpha_stake: None, + tao_stake: None, + total_stake: None, + tao_dividends_per_hotkey: None, + alpha_dividends_per_hotkey: None, + }; + + // test init value + assert_eq!(metagraph, expected); + + let wrong_index: usize = 100; + let metagraph_name = SelectiveMetagraph:: { + netuid: 0_u16.into(), + name: Some(vec![1_u8].into_iter().map(Compact).collect()), + ..Default::default() + }; + + // test merge function + metagraph.merge_value(&metagraph_name, wrong_index); + assert!(metagraph.name.is_none()); + + let name_index: usize = 0; + metagraph.merge_value(&metagraph_name, name_index); + assert!(metagraph.name.is_some()); + + let alph_low_index: usize = 49; + let metagraph_alpha_low = SelectiveMetagraph:: { + netuid: 0_u16.into(), + alpha_low: Some(0_u16.into()), + ..Default::default() + }; + assert!(metagraph.alpha_low.is_none()); + metagraph.merge_value(&metagraph_alpha_low, alph_low_index); + assert!(metagraph.alpha_low.is_some()); } diff --git a/pallets/subtensor/src/rpc_info/stake_info.rs b/pallets/subtensor/src/rpc_info/stake_info.rs index bda619596a..8a3888061f 100644 --- a/pallets/subtensor/src/rpc_info/stake_info.rs +++ b/pallets/subtensor/src/rpc_info/stake_info.rs @@ -2,6 +2,7 @@ use super::*; use frame_support::pallet_prelude::{Decode, Encode}; extern crate alloc; use codec::Compact; +use substrate_fixed::types::U96F32; #[freeze_struct("5cfb3c84c3af3116")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -112,4 +113,33 @@ impl Pallet { is_registered, }) } + + pub fn get_stake_fee( + origin: Option<(T::AccountId, u16)>, + origin_coldkey_account: T::AccountId, + destination: Option<(T::AccountId, u16)>, + destination_coldkey_account: T::AccountId, + amount: u64, + ) -> u64 { + let origin_: Option<(&T::AccountId, u16)> = + if let Some((ref origin_hotkey, origin_netuid)) = origin { + Some((origin_hotkey, origin_netuid)) + } else { + None + }; + + let destination_ = if let Some((ref destination_hotkey, destination_netuid)) = destination { + Some((destination_hotkey, destination_netuid)) + } else { + None + }; + + Self::calculate_staking_fee( + origin_, + &origin_coldkey_account, + destination_, + &destination_coldkey_account, + U96F32::saturating_from_num(amount), + ) + } } diff --git a/pallets/subtensor/src/staking/add_stake.rs b/pallets/subtensor/src/staking/add_stake.rs index ed607eec54..ee7cde7e46 100644 --- a/pallets/subtensor/src/staking/add_stake.rs +++ b/pallets/subtensor/src/staking/add_stake.rs @@ -1,5 +1,6 @@ use super::*; -use sp_core::Get; +use substrate_fixed::types::I96F32; +use subtensor_swap_interface::SwapHandler; impl Pallet { /// ---- The implementation for the extrinsic add_stake: Adds stake to a hotkey account. @@ -58,13 +59,18 @@ impl Pallet { )?; // 3. Ensure the remove operation from the coldkey is a success. - let tao_staked: u64 = - Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; + let tao_staked: I96F32 = + Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?.into(); // 4. Swap the stake into alpha on the subnet and increase counters. // Emit the staking event. - let fee = DefaultStakingFee::::get(); - Self::stake_into_subnet(&hotkey, &coldkey, netuid, tao_staked, fee); + Self::stake_into_subnet( + &hotkey, + &coldkey, + netuid, + tao_staked.saturating_to_num::(), + T::SwapInterface::max_price(), + ); // Ok and return. Ok(()) @@ -148,12 +154,20 @@ impl Pallet { } // 5. Ensure the remove operation from the coldkey is a success. - let tao_staked: u64 = Self::remove_balance_from_coldkey_account(&coldkey, possible_stake)?; + let tao_staked: I96F32 = + Self::remove_balance_from_coldkey_account(&coldkey, possible_stake)?.into(); // 6. Swap the stake into alpha on the subnet and increase counters. // Emit the staking event. let fee = DefaultStakingFee::::get(); - Self::stake_into_subnet(&hotkey, &coldkey, netuid, tao_staked, fee); + Self::stake_into_subnet( + &hotkey, + &coldkey, + netuid, + tao_staked.saturating_to_num::(), + limit_price, + fee, + ); // Ok and return. Ok(()) diff --git a/pallets/subtensor/src/staking/helpers.rs b/pallets/subtensor/src/staking/helpers.rs index 5aff56ea28..9ee04f36a8 100644 --- a/pallets/subtensor/src/staking/helpers.rs +++ b/pallets/subtensor/src/staking/helpers.rs @@ -1,5 +1,6 @@ use super::*; -use substrate_fixed::types::I96F32; +use safe_math::*; +use substrate_fixed::types::U96F32; use frame_support::traits::{ Imbalance, @@ -46,10 +47,10 @@ impl Pallet { Self::get_all_subnet_netuids() .iter() .map(|netuid| { - let alpha: I96F32 = I96F32::saturating_from_num( + let alpha: U96F32 = U96F32::saturating_from_num( Self::get_stake_for_hotkey_on_subnet(hotkey, *netuid), ); - let tao_price: I96F32 = Self::get_alpha_price(*netuid); + let tao_price: U96F32 = Self::get_alpha_price(*netuid); alpha.saturating_mul(tao_price).saturating_to_num::() }) .sum() @@ -66,9 +67,9 @@ impl Pallet { for (netuid, _) in Alpha::::iter_prefix((hotkey, coldkey)) { let alpha_stake = Self::get_stake_for_hotkey_and_coldkey_on_subnet(hotkey, coldkey, netuid); - let tao_price: I96F32 = Self::get_alpha_price(netuid); + let tao_price: U96F32 = Self::get_alpha_price(netuid); total_stake = total_stake.saturating_add( - I96F32::saturating_from_num(alpha_stake) + U96F32::saturating_from_num(alpha_stake) .saturating_mul(tao_price) .saturating_to_num::(), ); @@ -128,10 +129,9 @@ impl Pallet { pub fn get_hotkey_take(hotkey: &T::AccountId) -> u16 { Delegates::::get(hotkey) } - pub fn get_hotkey_take_float(hotkey: &T::AccountId) -> I96F32 { - I96F32::saturating_from_num(Self::get_hotkey_take(hotkey)) - .checked_div(I96F32::saturating_from_num(u16::MAX)) - .unwrap_or(I96F32::saturating_from_num(0.0)) + pub fn get_hotkey_take_float(hotkey: &T::AccountId) -> U96F32 { + U96F32::saturating_from_num(Self::get_hotkey_take(hotkey)) + .safe_div(U96F32::saturating_from_num(u16::MAX)) } /// Returns true if the hotkey account has been created. diff --git a/pallets/subtensor/src/staking/mod.rs b/pallets/subtensor/src/staking/mod.rs index ecf8fb8815..570658631a 100644 --- a/pallets/subtensor/src/staking/mod.rs +++ b/pallets/subtensor/src/staking/mod.rs @@ -5,6 +5,7 @@ pub mod decrease_take; pub mod helpers; pub mod increase_take; pub mod move_stake; +pub mod recycle_alpha; pub mod remove_stake; pub mod set_children; pub mod stake_utils; diff --git a/pallets/subtensor/src/staking/move_stake.rs b/pallets/subtensor/src/staking/move_stake.rs index 9620664b5c..68c681ef03 100644 --- a/pallets/subtensor/src/staking/move_stake.rs +++ b/pallets/subtensor/src/staking/move_stake.rs @@ -1,7 +1,8 @@ use super::*; use safe_math::*; use sp_core::Get; -use substrate_fixed::types::U64F64; +use substrate_fixed::types::{U64F64, U96F32}; +use subtensor_swap_interface::SwapHandler; impl Pallet { /// Moves stake from one hotkey to another across subnets. @@ -51,7 +52,7 @@ impl Pallet { )?; // Log the event. - log::info!( + log::debug!( "StakeMoved( coldkey:{:?}, origin_hotkey:{:?}, origin_netuid:{:?}, destination_hotkey:{:?}, destination_netuid:{:?} )", coldkey.clone(), origin_hotkey.clone(), @@ -133,7 +134,7 @@ impl Pallet { )?; // 9. Emit an event for logging/monitoring. - log::info!( + log::debug!( "StakeTransferred(origin_coldkey: {:?}, destination_coldkey: {:?}, hotkey: {:?}, origin_netuid: {:?}, destination_netuid: {:?}, amount: {:?})", coldkey, destination_coldkey, @@ -203,7 +204,7 @@ impl Pallet { )?; // Emit an event for logging. - log::info!( + log::debug!( "StakeSwapped(coldkey: {:?}, hotkey: {:?}, origin_netuid: {:?}, destination_netuid: {:?}, amount: {:?})", coldkey, hotkey, @@ -275,7 +276,7 @@ impl Pallet { )?; // Emit an event for logging. - log::info!( + log::debug!( "StakeSwapped(coldkey: {:?}, hotkey: {:?}, origin_netuid: {:?}, destination_netuid: {:?}, amount: {:?})", coldkey, hotkey, @@ -330,13 +331,28 @@ impl Pallet { check_transfer_toggle, )?; + // Calculate the amount that should be moved in this operation + let move_amount = if alpha_amount < max_amount { + alpha_amount + } else { + max_amount + }; + // Unstake from the origin subnet, returning TAO (or a 1:1 equivalent). - let fee = DefaultStakingFee::::get().safe_div(2); + let fee = Self::calculate_staking_fee( + Some((origin_hotkey, origin_netuid)), + origin_coldkey, + Some((destination_hotkey, destination_netuid)), + destination_coldkey, + U96F32::saturating_from_num(alpha_amount), + ) + .safe_div(2); + let tao_unstaked = Self::unstake_from_subnet( origin_hotkey, origin_coldkey, origin_netuid, - alpha_amount, + move_amount, fee, ); @@ -354,6 +370,7 @@ impl Pallet { destination_coldkey, destination_netuid, tao_unstaked, + T::SwapInterface::max_price(), fee, ); } diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs new file mode 100644 index 0000000000..b5e6762e6a --- /dev/null +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -0,0 +1,136 @@ +use super::*; +use crate::{Error, system::ensure_signed}; + +impl Pallet { + /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet + /// + /// # Arguments + /// + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of alpha to recycle + /// * `netuid` - The subnet ID from which to reduce AlphaOut + /// + /// # Returns + /// + /// * `DispatchResult` - Success or error + pub(crate) fn do_recycle_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + let coldkey: T::AccountId = ensure_signed(origin)?; + + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + ensure!( + netuid != Self::get_root_netuid(), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + // Ensure that the hotkey account exists this is only possible through registration. + ensure!( + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists + ); + + // Ensure that the hotkey has enough stake to withdraw. + ensure!( + Self::has_enough_stake_on_subnet(&hotkey, &coldkey, netuid, amount), + Error::::NotEnoughStakeToWithdraw + ); + + ensure!( + SubnetAlphaOut::::get(netuid) >= amount, + Error::::InsufficientLiquidity + ); + + // Deduct from the coldkey's stake. + let actual_alpha_decrease = Self::decrease_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, amount, + ); + + // Recycle means we should decrease the alpha issuance tracker. + SubnetAlphaOut::::mutate(netuid, |total| { + *total = total.saturating_sub(actual_alpha_decrease); + }); + + Self::deposit_event(Event::AlphaRecycled( + coldkey, + hotkey, + actual_alpha_decrease, + netuid, + )); + + Ok(()) + } + + /// Burns alpha from a cold/hot key pair without reducing AlphaOut + /// + /// # Arguments + /// + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The "up to" amount of alpha to burn + /// * `netuid` - The subnet ID + /// + /// # Returns + /// + /// * `DispatchResult` - Success or error + pub(crate) fn do_burn_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + ensure!( + netuid != Self::get_root_netuid(), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + // Ensure that the hotkey account exists this is only possible through registration. + ensure!( + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists + ); + + // Ensure that the hotkey has enough stake to withdraw. + ensure!( + Self::has_enough_stake_on_subnet(&hotkey, &coldkey, netuid, amount), + Error::::NotEnoughStakeToWithdraw + ); + + ensure!( + SubnetAlphaOut::::get(netuid) >= amount, + Error::::InsufficientLiquidity + ); + + // Deduct from the coldkey's stake. + let actual_alpha_decrease = Self::decrease_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, amount, + ); + + // This is a burn, so we don't need to update AlphaOut. + + // Deposit event + Self::deposit_event(Event::AlphaBurned( + coldkey, + hotkey, + actual_alpha_decrease, + netuid, + )); + + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index c1db7012c3..5ff490b01b 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -1,5 +1,6 @@ use super::*; -use sp_core::Get; +use substrate_fixed::types::U96F32; +use subtensor_swap_interface::SwapHandler; impl Pallet { /// ---- The implementation for the extrinsic remove_stake: Removes stake from a hotkey account and adds it onto a coldkey. @@ -39,7 +40,7 @@ impl Pallet { ) -> dispatch::DispatchResult { // 1. We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. let coldkey = ensure_signed(origin)?; - log::info!( + log::debug!( "do_remove_stake( origin:{:?} hotkey:{:?}, netuid: {:?}, alpha_unstaked:{:?} )", coldkey, hotkey, @@ -58,7 +59,13 @@ impl Pallet { )?; // 3. Swap the alpba to tao and update counters for this subnet. - let fee = DefaultStakingFee::::get(); + let fee = Self::calculate_staking_fee( + Some((&hotkey, netuid)), + &coldkey, + None, + &coldkey, + U96F32::saturating_from_num(alpha_unstaked), + ); let tao_unstaked: u64 = Self::unstake_from_subnet(&hotkey, &coldkey, netuid, alpha_unstaked, fee); @@ -109,11 +116,9 @@ impl Pallet { origin: T::RuntimeOrigin, hotkey: T::AccountId, ) -> dispatch::DispatchResult { - let fee = DefaultStakingFee::::get(); - // 1. We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. let coldkey = ensure_signed(origin)?; - log::info!("do_unstake_all( origin:{:?} hotkey:{:?} )", coldkey, hotkey); + log::debug!("do_unstake_all( origin:{:?} hotkey:{:?} )", coldkey, hotkey); // 2. Ensure that the hotkey account exists this is only possible through registration. ensure!( @@ -126,20 +131,43 @@ impl Pallet { log::debug!("All subnet netuids: {:?}", netuids); // 4. Iterate through all subnets and remove stake. - for netuid in netuids.iter() { + for netuid in netuids.into_iter() { // Ensure that the hotkey has enough stake to withdraw. let alpha_unstaked = - Self::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, *netuid); + Self::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + + if Self::validate_remove_stake( + &coldkey, + &hotkey, + netuid, + alpha_unstaked, + alpha_unstaked, + false, + ) + .is_err() + { + // Don't unstake from this netuid + continue; + } + + let fee = Self::calculate_staking_fee( + Some((&hotkey, netuid)), + &coldkey, + None, + &coldkey, + U96F32::saturating_from_num(alpha_unstaked), + ); + if alpha_unstaked > 0 { // Swap the alpha to tao and update counters for this subnet. let tao_unstaked: u64 = - Self::unstake_from_subnet(&hotkey, &coldkey, *netuid, alpha_unstaked, fee); + Self::unstake_from_subnet(&hotkey, &coldkey, netuid, alpha_unstaked, fee); // Add the balance to the coldkey. If the above fails we will not credit this coldkey. Self::add_balance_to_coldkey_account(&coldkey, tao_unstaked); // If the stake is below the minimum, we clear the nomination from storage. - Self::clear_small_nomination_if_required(&hotkey, &coldkey, *netuid); + Self::clear_small_nomination_if_required(&hotkey, &coldkey, netuid); } } @@ -177,11 +205,9 @@ impl Pallet { origin: T::RuntimeOrigin, hotkey: T::AccountId, ) -> dispatch::DispatchResult { - let fee = DefaultStakingFee::::get(); - // 1. We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. let coldkey = ensure_signed(origin)?; - log::info!("do_unstake_all( origin:{:?} hotkey:{:?} )", coldkey, hotkey); + log::debug!("do_unstake_all( origin:{:?} hotkey:{:?} )", coldkey, hotkey); // 2. Ensure that the hotkey account exists this is only possible through registration. ensure!( @@ -195,22 +221,45 @@ impl Pallet { // 4. Iterate through all subnets and remove stake. let mut total_tao_unstaked: u64 = 0; - for netuid in netuids.iter() { + for netuid in netuids.into_iter() { // If not Root network. - if *netuid != Self::get_root_netuid() { + if netuid != Self::get_root_netuid() { // Ensure that the hotkey has enough stake to withdraw. let alpha_unstaked = - Self::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, *netuid); + Self::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + + if Self::validate_remove_stake( + &coldkey, + &hotkey, + netuid, + alpha_unstaked, + alpha_unstaked, + false, + ) + .is_err() + { + // Don't unstake from this netuid + continue; + } + + let fee = Self::calculate_staking_fee( + Some((&hotkey, netuid)), + &coldkey, + None, + &coldkey, + U96F32::saturating_from_num(alpha_unstaked), + ); + if alpha_unstaked > 0 { // Swap the alpha to tao and update counters for this subnet. - let tao_unstaked: u64 = - Self::unstake_from_subnet(&hotkey, &coldkey, *netuid, alpha_unstaked, fee); + let tao_unstaked = + Self::unstake_from_subnet(&hotkey, &coldkey, netuid, alpha_unstaked, fee); // Increment total total_tao_unstaked = total_tao_unstaked.saturating_add(tao_unstaked); // If the stake is below the minimum, we clear the nomination from storage. - Self::clear_small_nomination_if_required(&hotkey, &coldkey, *netuid); + Self::clear_small_nomination_if_required(&hotkey, &coldkey, netuid); } } } @@ -221,7 +270,7 @@ impl Pallet { &coldkey, Self::get_root_netuid(), total_tao_unstaked, - 0, // no fee for restaking + T::SwapInterface::max_price(), ); // 5. Done and ok. @@ -276,7 +325,7 @@ impl Pallet { ) -> dispatch::DispatchResult { // 1. We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. let coldkey = ensure_signed(origin)?; - log::info!( + log::debug!( "do_remove_stake( origin:{:?} hotkey:{:?}, netuid: {:?}, alpha_unstaked:{:?} )", coldkey, hotkey, @@ -302,8 +351,14 @@ impl Pallet { )?; // 4. Swap the alpha to tao and update counters for this subnet. - let fee = DefaultStakingFee::::get(); - let tao_unstaked: u64 = + let fee = Self::calculate_staking_fee( + Some((&hotkey, netuid)), + &coldkey, + None, + &coldkey, + U96F32::saturating_from_num(alpha_unstaked), + ); + let tao_unstaked = Self::unstake_from_subnet(&hotkey, &coldkey, netuid, possible_alpha, fee); // 5. We add the balance to the coldkey. If the above fails we will not credit this coldkey. diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index 0a3659b6ac..b11db6d531 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -2,7 +2,7 @@ use super::*; use safe_math::*; use share_pool::{SharePool, SharePoolDataOperations}; use sp_std::ops::Neg; -use substrate_fixed::types::{I64F64, I96F32, I110F18, U64F64}; +use substrate_fixed::types::{I64F64, I96F32, U64F64, U96F32, U110F18}; impl Pallet { /// Retrieves the total alpha issuance for a given subnet. @@ -30,40 +30,63 @@ impl Pallet { /// /// # Returns /// * `I96F32` - The price of alpha for the specified subnet. - pub fn get_alpha_price(netuid: u16) -> I96F32 { + pub fn get_alpha_price(netuid: u16) -> U96F32 { if netuid == Self::get_root_netuid() { - return I96F32::saturating_from_num(1.0); // Root. + return U96F32::saturating_from_num(1.0); // Root. } if SubnetMechanism::::get(netuid) == 0 { - return I96F32::saturating_from_num(1.0); // Stable + return U96F32::saturating_from_num(1.0); // Stable } if SubnetAlphaIn::::get(netuid) == 0 { - I96F32::saturating_from_num(0) + U96F32::saturating_from_num(0) } else { - I96F32::saturating_from_num(SubnetTAO::::get(netuid)) - .checked_div(I96F32::saturating_from_num(SubnetAlphaIn::::get(netuid))) - .unwrap_or(I96F32::saturating_from_num(0)) + U96F32::saturating_from_num(SubnetTAO::::get(netuid)) + .checked_div(U96F32::saturating_from_num(SubnetAlphaIn::::get(netuid))) + .unwrap_or(U96F32::saturating_from_num(0)) } } - pub fn get_moving_alpha_price(netuid: u16) -> I96F32 { + pub fn get_moving_alpha_price(netuid: u16) -> U96F32 { + let one = U96F32::saturating_from_num(1.0); if netuid == Self::get_root_netuid() { // Root. - I96F32::saturating_from_num(1.0) + one } else if SubnetMechanism::::get(netuid) == 0 { // Stable - I96F32::saturating_from_num(1.0) + one } else { - SubnetMovingPrice::::get(netuid) + U96F32::saturating_from_num(SubnetMovingPrice::::get(netuid)) } } pub fn update_moving_price(netuid: u16) { - let alpha: I96F32 = SubnetMovingAlpha::::get(); - let minus_alpha: I96F32 = I96F32::saturating_from_num(1.0).saturating_sub(alpha); - let current_price: I96F32 = alpha - .saturating_mul(Self::get_alpha_price(netuid).min(I96F32::saturating_from_num(1.0))); - let current_moving: I96F32 = - minus_alpha.saturating_mul(Self::get_moving_alpha_price(netuid)); - let new_moving: I96F32 = current_price.saturating_add(current_moving); + let blocks_since_start_call = U96F32::saturating_from_num({ + // We expect FirstEmissionBlockNumber to be set earlier, and we take the block when + // `start_call` was called (first block before FirstEmissionBlockNumber). + let start_call_block = FirstEmissionBlockNumber::::get(netuid) + .unwrap_or_default() + .saturating_sub(1); + + Self::get_current_block_as_u64().saturating_sub(start_call_block) + }); + + // Use halving time hyperparameter. The meaning of this parameter can be best explained under + // the assumption of a constant price and SubnetMovingAlpha == 0.5: It is how many blocks it + // will take in order for the distance between current EMA of price and current price to shorten + // by half. + let halving_time = EMAPriceHalvingBlocks::::get(netuid); + let current_ma_unsigned = U96F32::saturating_from_num(SubnetMovingAlpha::::get()); + let alpha: U96F32 = current_ma_unsigned.saturating_mul(blocks_since_start_call.safe_div( + blocks_since_start_call.saturating_add(U96F32::saturating_from_num(halving_time)), + )); + // Because alpha = b / (b + h), where b and h > 0, alpha < 1, so 1 - alpha > 0. + // We can use unsigned type here: U96F32 + let one_minus_alpha: U96F32 = U96F32::saturating_from_num(1.0).saturating_sub(alpha); + let current_price: U96F32 = alpha + .saturating_mul(Self::get_alpha_price(netuid).min(U96F32::saturating_from_num(1.0))); + let current_moving: U96F32 = + one_minus_alpha.saturating_mul(Self::get_moving_alpha_price(netuid)); + // Convert batch to signed I96F32 to avoid migration of SubnetMovingPrice for now`` + let new_moving: I96F32 = + I96F32::saturating_from_num(current_price.saturating_add(current_moving)); SubnetMovingPrice::::insert(netuid, new_moving); } @@ -71,28 +94,28 @@ impl Pallet { /// /// This function performs the following steps: /// 1. Fetches the global weight from storage using the TaoWeight storage item. - /// 2. Converts the retrieved u64 value to a fixed-point number (I96F32). + /// 2. Converts the retrieved u64 value to a fixed-point number (U96F32). /// 3. Normalizes the weight by dividing it by the maximum possible u64 value. - /// 4. Returns the normalized weight as an I96F32 fixed-point number. + /// 4. Returns the normalized weight as an U96F32 fixed-point number. /// /// The normalization ensures that the returned value is always between 0 and 1, /// regardless of the actual stored weight value. /// /// # Returns - /// * `I96F32` - The normalized global global weight as a fixed-point number between 0 and 1. + /// * `U96F32` - The normalized global global weight as a fixed-point number between 0 and 1. /// /// # Note /// This function uses saturating division to prevent potential overflow errors. - pub fn get_tao_weight() -> I96F32 { + pub fn get_tao_weight() -> U96F32 { // Step 1: Fetch the global weight from storage let stored_weight = TaoWeight::::get(); - // Step 2: Convert the u64 weight to I96F32 - let weight_fixed = I96F32::saturating_from_num(stored_weight); + // Step 2: Convert the u64 weight to U96F32 + let weight_fixed = U96F32::saturating_from_num(stored_weight); // Step 3: Normalize the weight by dividing by u64::MAX // This ensures the result is always between 0 and 1 - weight_fixed.safe_div(I96F32::saturating_from_num(u64::MAX)) + weight_fixed.safe_div(U96F32::saturating_from_num(u64::MAX)) } /// Sets the global global weight in storage. @@ -225,13 +248,13 @@ impl Pallet { /// # Note /// This function uses saturating arithmetic to prevent overflows. pub fn get_tao_inherited_for_hotkey_on_subnet(hotkey: &T::AccountId, netuid: u16) -> u64 { - let initial_tao: I96F32 = I96F32::saturating_from_num( + let initial_tao: U96F32 = U96F32::saturating_from_num( Self::get_stake_for_hotkey_on_subnet(hotkey, Self::get_root_netuid()), ); // Initialize variables to track alpha allocated to children and inherited from parents. - let mut tao_to_children: I96F32 = I96F32::saturating_from_num(0); - let mut tao_from_parents: I96F32 = I96F32::saturating_from_num(0); + let mut tao_to_children: U96F32 = U96F32::saturating_from_num(0); + let mut tao_from_parents: U96F32 = U96F32::saturating_from_num(0); // Step 2: Retrieve the lists of parents and children for the hotkey on the subnet. let parents: Vec<(u64, T::AccountId)> = Self::get_parents(hotkey, netuid); @@ -252,16 +275,16 @@ impl Pallet { // Step 3: Calculate the total tao allocated to children. for (proportion, _) in children { // Convert the proportion to a normalized value between 0 and 1. - let normalized_proportion: I96F32 = I96F32::saturating_from_num(proportion) - .safe_div(I96F32::saturating_from_num(u64::MAX)); + let normalized_proportion: U96F32 = U96F32::saturating_from_num(proportion) + .safe_div(U96F32::saturating_from_num(u64::MAX)); log::trace!( "Normalized proportion for child: {:?}", normalized_proportion ); // Calculate the amount of tao to be allocated to this child. - let tao_proportion_to_child: I96F32 = - I96F32::saturating_from_num(initial_tao).saturating_mul(normalized_proportion); + let tao_proportion_to_child: U96F32 = + U96F32::saturating_from_num(initial_tao).saturating_mul(normalized_proportion); log::trace!("Tao proportion to child: {:?}", tao_proportion_to_child); // Add this child's allocation to the total tao allocated to children. @@ -272,7 +295,7 @@ impl Pallet { // Step 4: Calculate the total tao inherited from parents. for (proportion, parent) in parents { // Retrieve the parent's total stake on this subnet. - let parent_tao: I96F32 = I96F32::saturating_from_num( + let parent_tao: U96F32 = U96F32::saturating_from_num( Self::get_stake_for_hotkey_on_subnet(&parent, Self::get_root_netuid()), ); log::trace!( @@ -283,16 +306,16 @@ impl Pallet { ); // Convert the proportion to a normalized value between 0 and 1. - let normalized_proportion: I96F32 = I96F32::saturating_from_num(proportion) - .safe_div(I96F32::saturating_from_num(u64::MAX)); + let normalized_proportion: U96F32 = U96F32::saturating_from_num(proportion) + .safe_div(U96F32::saturating_from_num(u64::MAX)); log::trace!( "Normalized proportion from parent: {:?}", normalized_proportion ); // Calculate the amount of tao to be inherited from this parent. - let tao_proportion_from_parent: I96F32 = - I96F32::saturating_from_num(parent_tao).saturating_mul(normalized_proportion); + let tao_proportion_from_parent: U96F32 = + U96F32::saturating_from_num(parent_tao).saturating_mul(normalized_proportion); log::trace!( "Tao proportion from parent: {:?}", tao_proportion_from_parent @@ -304,7 +327,7 @@ impl Pallet { log::trace!("Total tao inherited from parents: {:?}", tao_from_parents); // Step 5: Calculate the final inherited tao for the hotkey. - let finalized_tao: I96F32 = initial_tao + let finalized_tao: U96F32 = initial_tao .saturating_sub(tao_to_children) // Subtract tao allocated to children .saturating_add(tao_from_parents); // Add tao inherited from parents log::trace!( @@ -320,8 +343,8 @@ impl Pallet { pub fn get_inherited_for_hotkey_on_subnet(hotkey: &T::AccountId, netuid: u16) -> u64 { // Step 1: Retrieve the initial total stake (alpha) for the hotkey on the specified subnet. - let initial_alpha: I96F32 = - I96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, netuid)); + let initial_alpha: U96F32 = + U96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, netuid)); log::debug!( "Initial alpha for hotkey {:?} on subnet {}: {:?}", hotkey, @@ -333,8 +356,8 @@ impl Pallet { } // Initialize variables to track alpha allocated to children and inherited from parents. - let mut alpha_to_children: I96F32 = I96F32::saturating_from_num(0); - let mut alpha_from_parents: I96F32 = I96F32::saturating_from_num(0); + let mut alpha_to_children: U96F32 = U96F32::saturating_from_num(0); + let mut alpha_from_parents: U96F32 = U96F32::saturating_from_num(0); // Step 2: Retrieve the lists of parents and children for the hotkey on the subnet. let parents: Vec<(u64, T::AccountId)> = Self::get_parents(hotkey, netuid); @@ -355,16 +378,16 @@ impl Pallet { // Step 3: Calculate the total alpha allocated to children. for (proportion, _) in children { // Convert the proportion to a normalized value between 0 and 1. - let normalized_proportion: I96F32 = I96F32::saturating_from_num(proportion) - .safe_div(I96F32::saturating_from_num(u64::MAX)); + let normalized_proportion: U96F32 = U96F32::saturating_from_num(proportion) + .safe_div(U96F32::saturating_from_num(u64::MAX)); log::trace!( "Normalized proportion for child: {:?}", normalized_proportion ); // Calculate the amount of alpha to be allocated to this child. - let alpha_proportion_to_child: I96F32 = - I96F32::saturating_from_num(initial_alpha).saturating_mul(normalized_proportion); + let alpha_proportion_to_child: U96F32 = + U96F32::saturating_from_num(initial_alpha).saturating_mul(normalized_proportion); log::trace!("Alpha proportion to child: {:?}", alpha_proportion_to_child); // Add this child's allocation to the total alpha allocated to children. @@ -375,8 +398,8 @@ impl Pallet { // Step 4: Calculate the total alpha inherited from parents. for (proportion, parent) in parents { // Retrieve the parent's total stake on this subnet. - let parent_alpha: I96F32 = - I96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(&parent, netuid)); + let parent_alpha: U96F32 = + U96F32::saturating_from_num(Self::get_stake_for_hotkey_on_subnet(&parent, netuid)); log::trace!( "Parent alpha for parent {:?} on subnet {}: {:?}", parent, @@ -385,16 +408,16 @@ impl Pallet { ); // Convert the proportion to a normalized value between 0 and 1. - let normalized_proportion: I96F32 = I96F32::saturating_from_num(proportion) - .safe_div(I96F32::saturating_from_num(u64::MAX)); + let normalized_proportion: U96F32 = U96F32::saturating_from_num(proportion) + .safe_div(U96F32::saturating_from_num(u64::MAX)); log::trace!( "Normalized proportion from parent: {:?}", normalized_proportion ); // Calculate the amount of alpha to be inherited from this parent. - let alpha_proportion_from_parent: I96F32 = - I96F32::saturating_from_num(parent_alpha).saturating_mul(normalized_proportion); + let alpha_proportion_from_parent: U96F32 = + U96F32::saturating_from_num(parent_alpha).saturating_mul(normalized_proportion); log::trace!( "Alpha proportion from parent: {:?}", alpha_proportion_from_parent @@ -409,7 +432,7 @@ impl Pallet { ); // Step 5: Calculate the final inherited alpha for the hotkey. - let finalized_alpha: I96F32 = initial_alpha + let finalized_alpha: U96F32 = initial_alpha .saturating_sub(alpha_to_children) // Subtract alpha allocated to children .saturating_add(alpha_from_parents); // Add alpha inherited from parents log::trace!( @@ -546,8 +569,12 @@ impl Pallet { amount: u64, ) -> u64 { let mut alpha_share_pool = Self::get_alpha_share_pool(hotkey.clone(), netuid); + // We expect to add a positive amount here. let actual_alpha = alpha_share_pool.update_value_for_one(coldkey, amount as i64); - actual_alpha.unsigned_abs() + + // We should return a positive amount, or 0 if the operation failed. + // e.g. the stake was removed due to precision issues. + actual_alpha.max(0).unsigned_abs() } pub fn try_increase_stake_for_hotkey_and_coldkey_on_subnet( @@ -576,6 +603,8 @@ impl Pallet { amount: u64, ) -> u64 { let mut alpha_share_pool = Self::get_alpha_share_pool(hotkey.clone(), netuid); + + // We expect a negative value here let mut actual_alpha = 0; if let Ok(value) = alpha_share_pool.try_get_value(coldkey) { if value >= amount { @@ -583,7 +612,11 @@ impl Pallet { alpha_share_pool.update_value_for_one(coldkey, (amount as i64).neg()); } } - actual_alpha.unsigned_abs() + + // Get the negation of the removed alpha, and clamp at 0. + // This ensures we return a positive value, but only if + // `actual_alpha` was negative (i.e. a decrease in stake). + actual_alpha.neg().max(0).unsigned_abs() } /// Calculates Some(Alpha) returned from pool by staking operation @@ -598,15 +631,15 @@ impl Pallet { // Step 2: Initialized vars. if mechanism_id == 1 { // Step 3.a.1: Dynamic mechanism calculations - let tao_reserves: I110F18 = I110F18::saturating_from_num(SubnetTAO::::get(netuid)); - let alpha_reserves: I110F18 = - I110F18::saturating_from_num(SubnetAlphaIn::::get(netuid)); + let tao_reserves: U110F18 = U110F18::saturating_from_num(SubnetTAO::::get(netuid)); + let alpha_reserves: U110F18 = + U110F18::saturating_from_num(SubnetAlphaIn::::get(netuid)); // Step 3.a.2: Compute constant product k = alpha * tao - let k: I110F18 = alpha_reserves.saturating_mul(tao_reserves); + let k: U110F18 = alpha_reserves.saturating_mul(tao_reserves); // Calculate new alpha reserve - let new_alpha_reserves: I110F18 = - k.safe_div(tao_reserves.saturating_add(I110F18::saturating_from_num(tao))); + let new_alpha_reserves: U110F18 = + k.safe_div(tao_reserves.saturating_add(U110F18::saturating_from_num(tao))); // Step 3.a.3: Calculate alpha staked using the constant product formula // alpha_stake_recieved = current_alpha - (k / (current_tao + new_tao)) @@ -637,16 +670,16 @@ impl Pallet { // Step 2: Swap alpha and attain tao if mechanism_id == 1 { // Step 3.a.1: Dynamic mechanism calculations - let tao_reserves: I110F18 = I110F18::saturating_from_num(SubnetTAO::::get(netuid)); - let alpha_reserves: I110F18 = - I110F18::saturating_from_num(SubnetAlphaIn::::get(netuid)); + let tao_reserves: U110F18 = U110F18::saturating_from_num(SubnetTAO::::get(netuid)); + let alpha_reserves: U110F18 = + U110F18::saturating_from_num(SubnetAlphaIn::::get(netuid)); // Step 3.a.2: Compute constant product k = alpha * tao - let k: I110F18 = alpha_reserves.saturating_mul(tao_reserves); + let k: U110F18 = alpha_reserves.saturating_mul(tao_reserves); // Calculate new tao reserve - let new_tao_reserves: I110F18 = k - .checked_div(alpha_reserves.saturating_add(I110F18::saturating_from_num(alpha))) - .unwrap_or(I110F18::saturating_from_num(0)); + let new_tao_reserves: U110F18 = k + .checked_div(alpha_reserves.saturating_add(U110F18::saturating_from_num(alpha))) + .unwrap_or(U110F18::saturating_from_num(0)); // Step 3.a.3: Calculate alpha staked using the constant product formula // tao_recieved = tao_reserves - (k / (alpha_reserves + new_tao)) @@ -772,14 +805,16 @@ impl Pallet { tao_unstaked, actual_alpha_decrease, netuid, + actual_fee, )); - log::info!( - "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?} )", + log::debug!( + "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", coldkey.clone(), hotkey.clone(), tao_unstaked, actual_alpha_decrease, - netuid + netuid, + actual_fee ); // Step 6: Return the amount of TAO unstaked. @@ -789,21 +824,15 @@ impl Pallet { /// Stakes TAO into a subnet for a given hotkey and coldkey pair. /// /// We update the pools associated with a subnet as well as update hotkey alpha shares. - pub fn stake_into_subnet( + pub(crate) fn stake_into_subnet( hotkey: &T::AccountId, coldkey: &T::AccountId, netuid: u16, tao: u64, - fee: u64, + price_limit: u64, ) -> u64 { - // Step 1. Reduce tao amount by staking fee and credit this fee to SubnetTAO - // At this point tao was already withdrawn from the user balance and is considered - // available - let tao_staked = tao.saturating_sub(fee); - let actual_fee = tao.saturating_sub(tao_staked); - // Step 2. Swap the tao to alpha. - let alpha: u64 = Self::swap_tao_for_alpha(netuid, tao_staked); + let alpha = Self::swap_tao_for_alpha(netuid, tao_staked); let mut actual_alpha = 0; if (tao_staked > 0) && (alpha > 0) { // Step 3: Increase the alpha on the hotkey account. @@ -835,14 +864,16 @@ impl Pallet { tao_staked, actual_alpha, netuid, + actual_fee, )); - log::info!( - "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?} )", + log::debug!( + "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", coldkey.clone(), hotkey.clone(), tao_staked, actual_alpha, - netuid + netuid, + actual_fee ); // Step 7: Return the amount of alpha staked @@ -1049,6 +1080,70 @@ impl Pallet { Ok(()) } + + pub(crate) fn calculate_staking_fee( + origin: Option<(&T::AccountId, u16)>, + _origin_coldkey: &T::AccountId, + destination: Option<(&T::AccountId, u16)>, + _destination_coldkey: &T::AccountId, + alpha_estimate: U96F32, + ) -> u64 { + match origin { + // If origin is defined, we are removing/moving stake + Some((origin_hotkey, origin_netuid)) => { + if let Some((_destination_hotkey, destination_netuid)) = destination { + // This is a stake move/swap/transfer + if destination_netuid == origin_netuid { + // If destination is on the same subnet, use the default fee + return DefaultStakingFee::::get(); + } + } + + if origin_netuid == Self::get_root_netuid() + || SubnetMechanism::::get(origin_netuid) == 0 + { + // If the origin netuid is root, or the subnet mechanism is 0, use the default fee + DefaultStakingFee::::get() + } else { + // Otherwise, calculate the fee based on the alpha estimate + // Here we are using TotalHotkeyAlphaLastEpoch, which is exactly the value that + // was used to calculate AlphaDividendsPerSubnet + let tao_estimate = U96F32::saturating_from_num( + Self::sim_swap_alpha_for_tao( + origin_netuid, + alpha_estimate.saturating_to_num::(), + ) + .unwrap_or(0), + ); + let mut fee = tao_estimate + .saturating_mul( + U96F32::saturating_from_num(AlphaDividendsPerSubnet::::get( + origin_netuid, + &origin_hotkey, + )) + .safe_div(U96F32::saturating_from_num( + TotalHotkeyAlphaLastEpoch::::get(&origin_hotkey, origin_netuid), + )), + ) + .saturating_to_num::(); + + // 0.005% per epoch matches to 44% annual in compound interest. Do not allow the fee + // to be lower than that. (1.00005^(365*20) ~= 1.44) + let apr_20_percent = U96F32::saturating_from_num(0.00005); + fee = fee.max( + tao_estimate + .saturating_mul(apr_20_percent) + .saturating_to_num::(), + ); + + // We should at least get DefaultStakingFee anyway + fee.max(DefaultStakingFee::::get()) + } + } + // If origin is not defined, we are adding stake; use default fee + None => DefaultStakingFee::::get(), + } + } } /////////////////////////////////////////// diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index 121f38b338..5c698c1b33 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -20,7 +20,7 @@ impl Pallet { // Expand subnetwork with new account. Self::append_neuron(netuid, hotkey, block_number); - log::info!("add new neuron account"); + log::debug!("add new neuron account"); } else { // Replacement required. // We take the neuron with the lowest pruning score here. @@ -28,7 +28,7 @@ impl Pallet { // Replace the neuron account with the new info. Self::replace_neuron(netuid, neuron_uid, hotkey, block_number); - log::info!("prune neuron"); + log::debug!("prune neuron"); } // Return the UID of the neuron. diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 87704c558a..e4721c03f5 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -217,6 +217,11 @@ impl Pallet { Self::burn_tokens(actual_tao_lock_amount_less_pool_tao); } + if actual_tao_lock_amount > 0 && pool_initial_tao > 0 { + // Record in TotalStake the initial TAO in the pool. + Self::increase_total_stake(pool_initial_tao); + } + // --- 15. Add the identity if it exists if let Some(identity_value) = identity { ensure!( @@ -234,7 +239,7 @@ impl Pallet { netuid_to_register, mechid ); - Self::deposit_event(Event::NetworkAdded(netuid_to_register, 0)); + Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); // --- 17. Return success. Ok(()) @@ -267,7 +272,6 @@ impl Pallet { Self::set_target_registrations_per_interval(netuid, 1); Self::set_adjustment_alpha(netuid, 17_893_341_751_498_265_066); // 18_446_744_073_709_551_615 * 0.97 = 17_893_341_751_498_265_066 Self::set_immunity_period(netuid, 5000); - Self::set_min_burn(netuid, 1); Self::set_min_difficulty(netuid, u64::MAX); Self::set_max_difficulty(netuid, u64::MAX); @@ -315,4 +319,58 @@ impl Pallet { ); } } + + /// Execute the start call for a subnet. + /// + /// This function is used to trigger the start call process for a subnet identified by `netuid`. + /// It ensures that the subnet exists, the caller is the subnet owner, + /// and the last emission block number has not been set yet. + /// It then sets the last emission block number to the current block number. + /// + /// # Parameters + /// + /// * `origin`: The origin of the call, which is used to ensure the caller is the subnet owner. + /// * `netuid`: The unique identifier of the subnet for which the start call process is being initiated. + /// + /// # Raises + /// + /// * `Error::::SubNetworkDoesNotExist`: If the subnet does not exist. + /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. + /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. + /// + /// # Returns + /// + /// * `DispatchResult`: A result indicating the success or failure of the operation. + pub fn do_start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + Self::ensure_subnet_owner(origin, netuid)?; + ensure!( + FirstEmissionBlockNumber::::get(netuid).is_none(), + Error::::FirstEmissionBlockNumberAlreadySet + ); + + let registration_block_number = NetworkRegisteredAt::::get(netuid); + let current_block_number = Self::get_current_block_as_u64(); + + ensure!( + current_block_number + >= registration_block_number.saturating_add(T::DurationOfStartCall::get()), + Error::::NeedWaitingMoreBlocksToStarCall + ); + let next_block_number = current_block_number.saturating_add(1); + + FirstEmissionBlockNumber::::insert(netuid, next_block_number); + Self::deposit_event(Event::FirstEmissionBlockNumberSet( + netuid, + next_block_number, + )); + Ok(()) + } + + pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { + FirstEmissionBlockNumber::::get(netuid).is_some() + } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index aaf3b5fe6b..c97252677c 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -23,6 +23,7 @@ impl Pallet { Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index a122e7b985..06a6e4be03 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -1092,6 +1092,7 @@ impl Pallet { pub fn set_reveal_period(netuid: u16, reveal_period: u64) { RevealPeriodEpochs::::insert(netuid, reveal_period); + Self::deposit_event(Event::CommitRevealPeriodsSet(netuid, reveal_period)); } pub fn get_reveal_period(netuid: u16) -> u64 { RevealPeriodEpochs::::get(netuid) diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index 74070faaca..54c7c01d8e 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -158,56 +158,63 @@ impl Pallet { OwnedHotkeys::::insert(coldkey, hotkeys); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - // 3. Swap total hotkey alpha for all subnets. + // 3. Swap total hotkey alpha for all subnets it exists on. // TotalHotkeyAlpha( hotkey, netuid ) -> alpha -- the total alpha that the hotkey has on a specific subnet. - let all_netuids: Vec = Self::get_all_subnet_netuids(); - for netuid in all_netuids { - let old_total_hotkey_alpha = TotalHotkeyAlpha::::get(old_hotkey, netuid); - let new_total_hotkey_alpha = TotalHotkeyAlpha::::get(new_hotkey, netuid); - TotalHotkeyAlpha::::remove(old_hotkey, netuid); - TotalHotkeyAlpha::::insert( - new_hotkey, - netuid, - old_total_hotkey_alpha.saturating_add(new_total_hotkey_alpha), - ); - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - - // 4. Swap total hotkey shares on all subnets + TotalHotkeyAlpha::::iter_prefix(old_hotkey) + .drain() + .for_each(|(netuid, old_alpha)| { + let new_total_hotkey_alpha = TotalHotkeyAlpha::::get(new_hotkey, netuid); + TotalHotkeyAlpha::::insert( + new_hotkey, + netuid, + old_alpha.saturating_add(new_total_hotkey_alpha), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + }); + + // 4. Swap total hotkey shares on all subnets it exists on. // TotalHotkeyShares( hotkey, netuid ) -> alpha -- the total alpha that the hotkey has on a specific subnet. - let all_netuids: Vec = Self::get_all_subnet_netuids(); - for netuid in all_netuids { - let old_total_hotkey_shares = TotalHotkeyShares::::get(old_hotkey, netuid); - let new_total_hotkey_shares = TotalHotkeyShares::::get(new_hotkey, netuid); - TotalHotkeyShares::::remove(old_hotkey, netuid); - TotalHotkeyShares::::insert( - new_hotkey, - netuid, - old_total_hotkey_shares.saturating_add(new_total_hotkey_shares), - ); - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } + TotalHotkeyShares::::iter_prefix(old_hotkey) + .drain() + .for_each(|(netuid, old_shares)| { + let new_total_hotkey_shares = TotalHotkeyShares::::get(new_hotkey, netuid); + TotalHotkeyShares::::insert( + new_hotkey, + netuid, + old_shares.saturating_add(new_total_hotkey_shares), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + }); // 5. Swap LastTxBlock // LastTxBlock( hotkey ) --> u64 -- the last transaction block for the hotkey. + let last_tx_block: u64 = LastTxBlock::::get(old_hotkey); LastTxBlock::::remove(old_hotkey); - LastTxBlock::::insert(new_hotkey, Self::get_current_block_as_u64()); - weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 2)); + LastTxBlock::::insert(new_hotkey, last_tx_block); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); // 6. Swap LastTxBlockDelegateTake // LastTxBlockDelegateTake( hotkey ) --> u64 -- the last transaction block for the hotkey delegate take. + let last_tx_block_delegate_take: u64 = LastTxBlockDelegateTake::::get(old_hotkey); LastTxBlockDelegateTake::::remove(old_hotkey); - LastTxBlockDelegateTake::::insert(new_hotkey, Self::get_current_block_as_u64()); + LastTxBlockDelegateTake::::insert(new_hotkey, last_tx_block_delegate_take); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - // 7. Swap Senate members. + // 7. Swap LastTxBlockChildKeyTake + // LastTxBlockChildKeyTake( hotkey ) --> u64 -- the last transaction block for the hotkey child key take. + let last_tx_block_child_key_take: u64 = LastTxBlockChildKeyTake::::get(old_hotkey); + LastTxBlockChildKeyTake::::remove(old_hotkey); + LastTxBlockChildKeyTake::::insert(new_hotkey, last_tx_block_child_key_take); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + + // 8. Swap Senate members. // Senate( hotkey ) --> ? if T::SenateMembers::is_member(old_hotkey) { T::SenateMembers::swap_member(old_hotkey, new_hotkey).map_err(|e| e.error)?; weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } - // 8. Swap delegates. + // 9. Swap delegates. // Delegates( hotkey ) -> take value -- the hotkey delegate take value. if Delegates::::contains_key(old_hotkey) { let old_delegate_take = Delegates::::get(old_hotkey); @@ -427,6 +434,45 @@ impl Pallet { } } + // 16. Swap dividend records + TotalHotkeyAlphaLastEpoch::::iter_prefix(old_hotkey) + .drain() + .for_each(|(netuid, old_alpha)| { + // 16.1 Swap TotalHotkeyAlphaLastEpoch + let new_total_hotkey_alpha = + TotalHotkeyAlphaLastEpoch::::get(new_hotkey, netuid); + TotalHotkeyAlphaLastEpoch::::insert( + new_hotkey, + netuid, + old_alpha.saturating_add(new_total_hotkey_alpha), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // 16.2 Swap AlphaDividendsPerSubnet + let old_hotkey_alpha_dividends = + AlphaDividendsPerSubnet::::get(netuid, old_hotkey); + let new_hotkey_alpha_dividends = + AlphaDividendsPerSubnet::::get(netuid, new_hotkey); + AlphaDividendsPerSubnet::::remove(netuid, old_hotkey); + AlphaDividendsPerSubnet::::insert( + netuid, + new_hotkey, + old_hotkey_alpha_dividends.saturating_add(new_hotkey_alpha_dividends), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // 16.3 Swap TaoDividendsPerSubnet + let old_hotkey_tao_dividends = TaoDividendsPerSubnet::::get(netuid, old_hotkey); + let new_hotkey_tao_dividends = TaoDividendsPerSubnet::::get(netuid, new_hotkey); + TaoDividendsPerSubnet::::remove(netuid, old_hotkey); + TaoDividendsPerSubnet::::insert( + netuid, + new_hotkey, + old_hotkey_tao_dividends.saturating_add(new_hotkey_tao_dividends), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + }); + // Return successful after swapping all the relevant terms. Ok(()) } diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 9091e0e431..34c7b5459b 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -4,7 +4,7 @@ use super::mock::*; use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; -use substrate_fixed::types::{I64F64, I96F32}; +use substrate_fixed::types::{I64F64, I96F32, U96F32}; use crate::{utils::rate_limiting::TransactionType, *}; use sp_core::U256; @@ -961,7 +961,7 @@ fn test_childkey_take_rate_limiting() { last_block, limit, passes, - current_block.saturating_sub(last_block) + current_block - last_block ); }; @@ -2764,9 +2764,7 @@ fn test_set_weights_no_parent() { // Set a minimum stake to set weights SubtensorModule::set_stake_threshold( - curr_stake_weight - .saturating_sub(I64F64::saturating_from_num(5)) - .saturating_to_num::(), + (curr_stake_weight - I64F64::from_num(5)).to_num::(), ); // Check if the stake for the hotkey is above @@ -3029,7 +3027,7 @@ fn test_parent_child_chain_emission() { // Set the weight of root TAO to be 0%, so only alpha is effective. SubtensorModule::set_tao_weight(0); - let emission: I96F32 = I96F32::from_num(SubtensorModule::get_block_emission().unwrap_or(0)); + let emission: U96F32 = U96F32::from_num(SubtensorModule::get_block_emission().unwrap_or(0)); // Set pending emission to 0 PendingEmission::::insert(netuid, 0); @@ -3382,17 +3380,17 @@ fn test_dividend_distribution_with_children() { "C should have pending emission of 1/9 of total emission" ); - let dividends_a = SubtensorModule::get_dividends_distribution( + let dividends_a = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_a, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_b = SubtensorModule::get_dividends_distribution( + let dividends_b = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_b, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_c = SubtensorModule::get_dividends_distribution( + let dividends_c = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_c, netuid, hardcoded_emission.saturating_to_num::(), @@ -3883,12 +3881,12 @@ fn test_dividend_distribution_with_children_same_coldkey_owner() { ); // Get the distribution of dividends including the Parent/Child relationship. - let dividends_a = SubtensorModule::get_dividends_distribution( + let dividends_a = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_a, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_b = SubtensorModule::get_dividends_distribution( + let dividends_b = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_b, netuid, hardcoded_emission.saturating_to_num::(), @@ -3949,3 +3947,44 @@ fn test_dividend_distribution_with_children_same_coldkey_owner() { ); }); } + +#[test] +fn test_pending_cooldown_one_day() { + let curr_block = 1; + + let expected_cooldown = if cfg!(feature = "fast-blocks") { + 15 + } else { + 7_200 + }; + + new_test_ext(curr_block).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 1; + let proportion1: u64 = 1000; + let proportion2: u64 = 2000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set multiple children + mock_schedule_children( + &coldkey, + &hotkey, + netuid, + &[(proportion1, child1), (proportion2, child2)], + ); + + // Verify pending map + let pending_children = PendingChildKeys::::get(netuid, hotkey); + assert_eq!( + pending_children.0, + vec![(proportion1, child1), (proportion2, child2)] + ); + assert_eq!(pending_children.1, curr_block + expected_cooldown); + }); +} diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 2deea02f93..1345f36b7d 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -2,11 +2,11 @@ use super::mock::*; use crate::*; +use alloc::collections::BTreeMap; use approx::assert_abs_diff_eq; use frame_support::assert_ok; use sp_core::U256; -use substrate_fixed::types::I64F64; -use substrate_fixed::types::I96F32; +use substrate_fixed::types::{I64F64, I96F32, U96F32}; #[allow(clippy::arithmetic_side_effects)] fn close(value: u64, target: u64, eps: u64) { @@ -73,7 +73,7 @@ fn test_dynamic_function_various_values() { #[test] fn test_coinbase_basecase() { new_test_ext(1).execute_with(|| { - SubtensorModule::run_coinbase(I96F32::from_num(0.0)); + SubtensorModule::run_coinbase(U96F32::from_num(0.0)); }); } @@ -90,7 +90,7 @@ fn test_coinbase_tao_issuance_base() { let emission: u64 = 1_234_567; add_network(netuid, 1, 0); assert_eq!(SubnetTAO::::get(netuid), 0); - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); assert_eq!(SubnetTAO::::get(netuid), emission); assert_eq!(TotalIssuance::::get(), emission); assert_eq!(TotalStake::::get(), emission); @@ -105,7 +105,7 @@ fn test_coinbase_tao_issuance_base_low() { let emission: u64 = 1; add_network(netuid, 1, 0); assert_eq!(SubnetTAO::::get(netuid), 0); - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); assert_eq!(SubnetTAO::::get(netuid), emission); assert_eq!(TotalIssuance::::get(), emission); assert_eq!(TotalStake::::get(), emission); @@ -132,7 +132,7 @@ fn test_coinbase_tao_issuance_multiple() { assert_eq!(SubnetTAO::::get(netuid1), 0); assert_eq!(SubnetTAO::::get(netuid2), 0); assert_eq!(SubnetTAO::::get(netuid3), 0); - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); assert_eq!(SubnetTAO::::get(netuid1), emission / 3); assert_eq!(SubnetTAO::::get(netuid2), emission / 3); assert_eq!(SubnetTAO::::get(netuid3), emission / 3); @@ -165,7 +165,7 @@ fn test_coinbase_tao_issuance_different_prices() { assert_eq!(SubnetTAO::::get(netuid1), 0); assert_eq!(SubnetTAO::::get(netuid2), 0); // Run the coinbase with the emission amount. - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); // Assert tao emission is split evenly. assert_eq!(SubnetTAO::::get(netuid1), emission / 3); assert_eq!(SubnetTAO::::get(netuid2), emission / 3 + emission / 3); @@ -190,11 +190,16 @@ fn test_coinbase_moving_prices() { SubnetAlphaIn::::insert(netuid, 1_000_000); SubnetMechanism::::insert(netuid, 1); SubnetMovingPrice::::insert(netuid, I96F32::from_num(1)); + FirstEmissionBlockNumber::::insert(netuid, 1); + // Updating the moving price keeps it the same. assert_eq!( SubtensorModule::get_moving_alpha_price(netuid), I96F32::from_num(1) ); + // Skip some blocks so that EMA price is not slowed down + System::set_block_number(7_200_000); + SubtensorModule::update_moving_price(netuid); assert_eq!( SubtensorModule::get_moving_alpha_price(netuid), @@ -206,29 +211,79 @@ fn test_coinbase_moving_prices() { SubnetMovingAlpha::::set(I96F32::from_num(1.0)); // Run moving 1 times. SubtensorModule::update_moving_price(netuid); - // Assert price is == 100% of the real price. - assert_eq!( - SubtensorModule::get_moving_alpha_price(netuid), - I96F32::from_num(1.0) - ); + // Assert price is ~ 100% of the real price. + assert!(U96F32::from_num(1.0) - SubtensorModule::get_moving_alpha_price(netuid) < 0.05); // Set price to zero. SubnetMovingPrice::::insert(netuid, I96F32::from_num(0)); SubnetMovingAlpha::::set(I96F32::from_num(0.1)); - // Run moving 6 times. - SubtensorModule::update_moving_price(netuid); - SubtensorModule::update_moving_price(netuid); - SubtensorModule::update_moving_price(netuid); - SubtensorModule::update_moving_price(netuid); - SubtensorModule::update_moving_price(netuid); - SubtensorModule::update_moving_price(netuid); + + // EMA price 28 days after registration + System::set_block_number(7_200 * 28); + + // Run moving 14 times. + for _ in 0..14 { + SubtensorModule::update_moving_price(netuid); + } + // Assert price is > 50% of the real price. - assert_eq!( - SubtensorModule::get_moving_alpha_price(netuid), - I96F32::from_num(0.468559) + assert_abs_diff_eq!( + 0.512325, + SubtensorModule::get_moving_alpha_price(netuid).to_num::(), + epsilon = 0.001 ); }); } +// Test moving price updates slow down at the beginning. +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_update_moving_price_initial --exact --show-output --nocapture +#[test] +fn test_update_moving_price_initial() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 1, 0); + // Set current price to 1.0 + SubnetTAO::::insert(netuid, 1_000_000); + SubnetAlphaIn::::insert(netuid, 1_000_000); + SubnetMechanism::::insert(netuid, 1); + SubnetMovingAlpha::::set(I96F32::from_num(0.5)); + SubnetMovingPrice::::insert(netuid, I96F32::from_num(0)); + + // Registered recently + System::set_block_number(510); + FirstEmissionBlockNumber::::insert(netuid, 500); + + SubtensorModule::update_moving_price(netuid); + + let new_price = SubnetMovingPrice::::get(netuid); + assert!(new_price.to_num::() < 0.001); + }); +} + +// Test moving price updates slow down at the beginning. +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_update_moving_price_after_time --exact --show-output --nocapture +#[test] +fn test_update_moving_price_after_time() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 1, 0); + // Set current price to 1.0 + SubnetTAO::::insert(netuid, 1_000_000); + SubnetAlphaIn::::insert(netuid, 1_000_000); + SubnetMechanism::::insert(netuid, 1); + SubnetMovingAlpha::::set(I96F32::from_num(0.5)); + SubnetMovingPrice::::insert(netuid, I96F32::from_num(0)); + + // Registered long time ago + System::set_block_number(144_000_500); + FirstEmissionBlockNumber::::insert(netuid, 500); + + SubtensorModule::update_moving_price(netuid); + + let new_price = SubnetMovingPrice::::get(netuid); + assert!((new_price.to_num::() - 0.5).abs() < 0.001); + }); +} + // Test basic alpha issuance in coinbase mechanism. // This test verifies that: // - Alpha issuance is initialized to 0 for new subnets @@ -250,7 +305,7 @@ fn test_coinbase_alpha_issuance_base() { SubnetTAO::::insert(netuid2, initial); SubnetAlphaIn::::insert(netuid2, initial); // Check initial - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); // tao_in = 500_000 // alpha_in = 500_000/price = 500_000 assert_eq!(SubnetAlphaIn::::get(netuid1), initial + emission / 2); @@ -285,7 +340,7 @@ fn test_coinbase_alpha_issuance_different() { SubnetMovingPrice::::insert(netuid1, I96F32::from_num(1)); SubnetMovingPrice::::insert(netuid2, I96F32::from_num(2)); // Run coinbase - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); // tao_in = 333_333 // alpha_in = 333_333/price = 333_333 + initial assert_eq!(SubnetAlphaIn::::get(netuid1), initial + emission / 3); @@ -321,7 +376,7 @@ fn test_coinbase_alpha_issuance_with_cap_trigger() { SubnetMovingPrice::::insert(netuid1, I96F32::from_num(1)); SubnetMovingPrice::::insert(netuid2, I96F32::from_num(2)); // Run coinbase - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); // tao_in = 333_333 // alpha_in = 333_333/price > 1_000_000_000 --> 1_000_000_000 + initial_alpha assert_eq!( @@ -365,7 +420,7 @@ fn test_coinbase_alpha_issuance_with_cap_trigger_and_block_emission() { SubnetMovingPrice::::insert(netuid1, I96F32::from_num(1)); SubnetMovingPrice::::insert(netuid2, I96F32::from_num(2)); // Run coinbase - SubtensorModule::run_coinbase(I96F32::from_num(emission)); + SubtensorModule::run_coinbase(U96F32::from_num(emission)); // tao_in = 333_333 // alpha_in = 333_333/price > 1_000_000_000 --> 0 + initial_alpha assert_eq!(SubnetAlphaIn::::get(netuid1), initial_alpha); @@ -386,10 +441,10 @@ fn test_owner_cut_base() { add_network(netuid, 1, 0); SubtensorModule::set_tempo(netuid, 10000); // Large number (dont drain) SubtensorModule::set_subnet_owner_cut(0); - SubtensorModule::run_coinbase(I96F32::from_num(0)); + SubtensorModule::run_coinbase(U96F32::from_num(0)); assert_eq!(PendingOwnerCut::::get(netuid), 0); // No cut SubtensorModule::set_subnet_owner_cut(u16::MAX); - SubtensorModule::run_coinbase(I96F32::from_num(0)); + SubtensorModule::run_coinbase(U96F32::from_num(0)); assert_eq!(PendingOwnerCut::::get(netuid), 1_000_000_000); // Full cut. }); } @@ -401,14 +456,14 @@ fn test_pending_swapped() { let netuid: u16 = 1; let emission: u64 = 1_000_000; add_network(netuid, 1, 0); - SubtensorModule::run_coinbase(I96F32::from_num(0)); + SubtensorModule::run_coinbase(U96F32::from_num(0)); assert_eq!(PendingAlphaSwapped::::get(netuid), 0); // Zero tao weight and no root. SubnetTAO::::insert(0, 1_000_000_000); // Add root weight. - SubtensorModule::run_coinbase(I96F32::from_num(0)); + SubtensorModule::run_coinbase(U96F32::from_num(0)); assert_eq!(PendingAlphaSwapped::::get(netuid), 0); // Zero tao weight with 1 root. SubtensorModule::set_tempo(netuid, 10000); // Large number (dont drain) SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 - SubtensorModule::run_coinbase(I96F32::from_num(0)); + SubtensorModule::run_coinbase(U96F32::from_num(0)); assert_eq!(PendingAlphaSwapped::::get(netuid), 125000000); // 1 TAO / ( 1 + 3 ) = 0.25 * 1 / 2 = 125000000 assert_eq!( PendingEmission::::get(netuid), @@ -514,7 +569,7 @@ fn test_drain_base_with_subnet_with_single_staker_registered_root_weight() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); let root_after = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, root); - close(stake_before + pending_alpha / 2, stake_after, 10); // Registered gets all alpha emission. + close(stake_before + pending_alpha, stake_after, 10); // Registered gets all alpha emission. close(stake_before + pending_tao, root_after, 10); // Registered gets all tao emission }); } @@ -605,8 +660,8 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, netuid); let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); - close(stake_before + pending_alpha / 4, stake_after1, 10); // Registered gets 1/2 emission - close(stake_before + pending_alpha / 4, stake_after2, 10); // Registered gets 1/2 emission. + close(stake_before + pending_alpha / 2, stake_after1, 10); // Registered gets 1/2 emission + close(stake_before + pending_alpha / 2, stake_after2, 10); // Registered gets 1/2 emission. close(stake_before + pending_tao / 2, root_after1, 10); // Registered gets 1/2 tao emission close(stake_before + pending_tao / 2, root_after2, 10); // Registered gets 1/2 tao emission }); @@ -664,21 +719,17 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); let expected_stake = I96F32::from_num(stake_before) - + (I96F32::from_num(pending_alpha) - * I96F32::from_num(3.0 / 5.0) - * I96F32::from_num(1.0 / 3.0)); - close(expected_stake.to_num::(), stake_after1, 10); // Registered gets 60% of emission + + (I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0)); + assert_abs_diff_eq!(expected_stake.to_num::(), stake_after1, epsilon = 10); // Registered gets 50% of alpha emission let expected_stake2 = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(2.0 / 5.0) - * I96F32::from_num(1.0 / 2.0); - close(expected_stake2.to_num::(), stake_after2, 10); // Registered gets 40% emission + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake2.to_num::(), stake_after2, epsilon = 10); // Registered gets 50% emission let expected_root1 = I96F32::from_num(2 * stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - close(expected_root1.to_num::(), root_after1, 10); // Registered gets 2/3 tao emission + assert_abs_diff_eq!(expected_root1.to_num::(), root_after1, epsilon = 10); // Registered gets 2/3 tao emission let expected_root2 = I96F32::from_num(stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - close(expected_root2.to_num::(), root_after2, 10); // Registered gets 1/3 tao emission + assert_abs_diff_eq!(expected_root2.to_num::(), root_after2, epsilon = 10); // Registered gets 1/3 tao emission }); } @@ -734,26 +785,20 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, netuid); let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); - // hotkey 1 has (1 + (2 * 0.5))/( 1 + 1*0.5 + 1 + (2 * 0.5)) = 0.5714285714 of the hotkey emission. let expected_stake = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(0.5714285714) - * I96F32::from_num(1.0 / 2.0); - close(expected_stake.to_num::(), stake_after1, 10); - // hotkey 2 has (1 + 1*0.5)/( 1 + 1*0.5 + 1 + (2 * 0.5)) = 0.4285714286 of the hotkey emission. + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake.to_num::(), stake_after1, epsilon = 10); let expected_stake2 = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(0.4285714286) - * I96F32::from_num(2.0 / 3.0); - close(expected_stake2.to_num::(), stake_after2, 10); + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake2.to_num::(), stake_after2, epsilon = 10); // hotkey 1 has 2 / 3 root tao let expected_root1 = I96F32::from_num(2 * stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - close(expected_root1.to_num::(), root_after1, 10); + assert_abs_diff_eq!(expected_root1.to_num::(), root_after1, epsilon = 10); // hotkey 1 has 1 / 3 root tao let expected_root2 = I96F32::from_num(stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - close(expected_root2.to_num::(), root_after2, 10); + assert_abs_diff_eq!(expected_root2.to_num::(), root_after2, epsilon = 10); }); } @@ -1003,11 +1048,11 @@ fn test_get_root_children_drain() { // Alice and Bob both made half of the dividends. assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&alice, alpha), - alice_alpha_stake + pending_alpha / 4 + alice_alpha_stake + pending_alpha / 2 ); assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&bob, alpha), - bob_alpha_stake + pending_alpha / 4 + bob_alpha_stake + pending_alpha / 2 ); // Lets drain @@ -1037,9 +1082,10 @@ fn test_get_root_children_drain() { assert_eq!(AlphaDividendsPerSubnet::::get(alpha, alice), 0); assert_eq!(TaoDividendsPerSubnet::::get(alpha, alice), 0); // Bob makes it all. - assert_eq!( + assert_abs_diff_eq!( AlphaDividendsPerSubnet::::get(alpha, bob), - (I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 - 0.495412844)).to_num::() + pending_alpha, + epsilon = 1 ); assert_eq!(TaoDividendsPerSubnet::::get(alpha, bob), pending_root); }); @@ -1117,12 +1163,12 @@ fn test_get_root_children_drain_half_proportion() { // Alice and Bob make the same amount. close( AlphaDividendsPerSubnet::::get(alpha, alice), - pending_alpha / 4, + pending_alpha / 2, 10, ); close( AlphaDividendsPerSubnet::::get(alpha, bob), - pending_alpha / 4, + pending_alpha / 2, 10, ); }); @@ -1188,7 +1234,7 @@ fn test_get_root_children_drain_with_take() { // Set Bob as 100% child of Alice on root. ChildkeyTake::::insert(bob, alpha, u16::MAX); mock_set_children_no_epochs(alpha, &alice, &[(u64::MAX, bob)]); - // Set Bob childkey take to zero. + // Set Bob validator take to zero. Delegates::::insert(alice, 0); Delegates::::insert(bob, 0); @@ -1196,11 +1242,11 @@ fn test_get_root_children_drain_with_take() { let pending_alpha: u64 = 1_000_000_000; SubtensorModule::drain_pending_emission(alpha, pending_alpha, 0, 0, 0); - // Alice and Bob make the same amount. + // Bob makes it all. close(AlphaDividendsPerSubnet::::get(alpha, alice), 0, 10); close( AlphaDividendsPerSubnet::::get(alpha, bob), - pending_alpha / 2, + pending_alpha, 10, ); }); @@ -1277,12 +1323,12 @@ fn test_get_root_children_drain_with_half_take() { // Alice and Bob make the same amount. close( AlphaDividendsPerSubnet::::get(alpha, alice), - pending_alpha / 8, + pending_alpha / 4, 10000, ); close( AlphaDividendsPerSubnet::::get(alpha, bob), - 3 * (pending_alpha / 8), + 3 * (pending_alpha / 4), 10000, ); }); @@ -1385,3 +1431,703 @@ fn test_get_root_children_drain_with_half_take() { // ); // }); // } + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_incentive_to_subnet_owner_is_burned --exact --show-output --nocapture +#[test] +fn test_incentive_to_subnet_owner_is_burned() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + let other_ck = U256::from(2); + let other_hk = U256::from(3); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + + let pending_tao: u64 = 1_000_000_000; + let pending_alpha: u64 = 0; // None to valis + let owner_cut: u64 = 0; + let mut incentives: BTreeMap = BTreeMap::new(); + + // Give incentive to other_hk + incentives.insert(other_hk, 10_000_000); + + // Give incentives to subnet_owner_hk + incentives.insert(subnet_owner_hk, 10_000_000); + + // Verify stake before + let subnet_owner_stake_before = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_before, 0); + let other_stake_before = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); + assert_eq!(other_stake_before, 0); + + // Distribute dividends and incentives + SubtensorModule::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + BTreeMap::new(), + BTreeMap::new(), + ); + + // Verify stake after + let subnet_owner_stake_after = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_after, 0); + let other_stake_after = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); + assert!(other_stake_after > 0); + }); +} + +#[test] +fn test_calculate_dividend_distribution_totals() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 837_120_949_872; + let tao_weight: U96F32 = U96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738_u64.into()); + dividends.insert(hotkeys[1], 19_283_940_u64.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_only_tao() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 0; + let pending_tao: u64 = 837_120_949_872; + let tao_weight: U96F32 = U96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738_u64.into()); + dividends.insert(hotkeys[1], 19_283_940_u64.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_no_tao_weight() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 0; // If tao weight is 0, then only alpha dividends should be input. + let tao_weight: U96F32 = U96F32::saturating_from_num(0.0); // 0% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738_u64.into()); + dividends.insert(hotkeys[1], 19_283_940_u64.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_only_alpha() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 0; + let tao_weight: U96F32 = U96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738_u64.into()); + dividends.insert(hotkeys[1], 19_283_940_u64.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_and_incentive_distribution() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha = 123_456_789; + let pending_validator_alpha = pending_alpha / 2; // Pay half to validators. + let pending_tao: u64 = 0; + let pending_swapped = 0; // Only alpha output. + let tao_weight: U96F32 = U96F32::saturating_from_num(0.0); // 0% + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, pending_alpha / 2, pending_alpha / 2)]; + + let (incentives, (alpha_dividends, tao_dividends)) = + SubtensorModule::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + let incentives_total = incentives.values().sum::(); + let dividends_total = alpha_dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_abs_diff_eq!( + dividends_total.saturating_add(incentives_total), + pending_alpha, + epsilon = 2 + ); + }); +} + +#[test] +fn test_calculate_dividend_and_incentive_distribution_all_to_validators() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha = 123_456_789; + let pending_validator_alpha = pending_alpha; // Pay all to validators. + let pending_tao: u64 = 0; + let tao_weight: U96F32 = U96F32::saturating_from_num(0.0); // 0% + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, 0, pending_alpha)]; + + let (incentives, (alpha_dividends, tao_dividends)) = + SubtensorModule::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + let incentives_total = incentives.values().sum::(); + let dividends_total = alpha_dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + pending_alpha + ); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 123_456_789; + let incentive: u64 = 683_051_923; + let total_emission: u64 = divdends.saturating_add(incentive); + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + total_emission + ); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives_only_validators() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 123_456_789; + let incentive: u64 = 0; + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!(dividends_total, divdends); + assert_eq!(incentives_total, 0); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives_only_miners() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 0; + let incentive: u64 = 123_456_789; + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!(incentives_total, incentive); + assert_eq!(dividends_total, divdends); + }); +} + +#[test] +fn test_drain_pending_emission_no_miners_all_drained() { + new_test_ext(1).execute_with(|| { + let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let init_stake: u64 = 1; + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + // Set the emission to be 1 million. + let emission: u64 = 1_000_000; + // Run drain pending without any miners. + SubtensorModule::drain_pending_emission(netuid, emission, 0, 0, 0); + + // Get the new stake of the hotkey. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect this neuron to get *all* the emission. + // Slight epsilon due to rounding (hotkey_take). + assert_abs_diff_eq!(new_stake, emission.saturating_add(init_stake), epsilon = 1); + }); +} + +#[test] +fn test_drain_pending_emission_zero_emission() { + new_test_ext(1).execute_with(|| { + let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 50); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + run_to_block_no_epoch(netuid, 50); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Set the emission to be ZERO. + SubtensorModule::drain_pending_emission(netuid, 0, 0, 0, 0); + + // Get the new stake of the hotkey. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert_eq!(new_stake, init_stake); + + // Check that the incentive and dividends are set by epoch. + assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!(Dividends::::get(netuid).iter().sum::() > 0); + }); +} + +#[test] +fn test_run_coinbase_not_started() { + new_test_ext(1).execute_with(|| { + let netuid = 1; + let tempo = 2; + + let sn_owner_hk = U256::from(7); + let sn_owner_ck = U256::from(8); + + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + SubnetOwner::::insert(netuid, sn_owner_ck); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hk); + + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + register_ok_neuron(netuid, sn_owner_hk, sn_owner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 30); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Step so tempo should run. + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + let current_block = System::block_number(); + assert!(SubtensorModule::should_run_epoch(netuid, current_block)); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(U96F32::saturating_from_num(100_000_000)); + + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + // Get the new stake of the hotkey. We expect no emissions. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert_eq!(new_stake, init_stake); + + // Check that the incentive and dividends are set. + assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!(Dividends::::get(netuid).iter().sum::() > 0); + }); +} + +#[test] +fn test_run_coinbase_not_started_start_after() { + new_test_ext(1).execute_with(|| { + let netuid = 1; + let tempo = 2; + + let sn_owner_hk = U256::from(7); + let sn_owner_ck = U256::from(8); + + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + SubnetOwner::::insert(netuid, sn_owner_ck); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hk); + + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + register_ok_neuron(netuid, sn_owner_hk, sn_owner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 30); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Step so tempo should run. + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + let current_block = System::block_number(); + assert!(SubtensorModule::should_run_epoch(netuid, current_block)); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(U96F32::saturating_from_num(100_000_000)); + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + let block_number = DurationOfStartCall::get(); + run_to_block_no_epoch(netuid, block_number); + + let current_block = System::block_number(); + + // Run start call. + assert_ok!(SubtensorModule::start_call( + RuntimeOrigin::signed(sn_owner_ck), + netuid + )); + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(current_block + 1) + ); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(U96F32::saturating_from_num(100_000_000)); + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + // Get the new stake of the hotkey. We expect no emissions. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert!(new_stake > init_stake); + log::info!("new_stake: {}", new_stake); + }); +} diff --git a/pallets/subtensor/src/tests/delegate_info.rs b/pallets/subtensor/src/tests/delegate_info.rs index 6fbcbbfb87..fa9aa942f7 100644 --- a/pallets/subtensor/src/tests/delegate_info.rs +++ b/pallets/subtensor/src/tests/delegate_info.rs @@ -22,9 +22,8 @@ fn test_return_per_1000_tao() { // We expect 82 TAO per day with 10% of total_stake let expected_return_per_1000 = U64F64::from_num(82.0); - let diff_from_expected: f64 = (return_per_1000 / U64F64::from_num(1e9)) - .saturating_sub(expected_return_per_1000) - .to_num::(); + let diff_from_expected: f64 = + ((return_per_1000 / U64F64::from_num(1e9)) - expected_return_per_1000).to_num::(); let eps: f64 = 0.0005e9; // Precision within 0.0005 TAO assert!( diff --git a/pallets/subtensor/src/tests/emission.rs b/pallets/subtensor/src/tests/emission.rs index 61a08ccd32..c4d8d51e3c 100644 --- a/pallets/subtensor/src/tests/emission.rs +++ b/pallets/subtensor/src/tests/emission.rs @@ -102,7 +102,7 @@ fn test_consecutive_blocks() { let mut last_result = SubtensorModule::blocks_until_next_epoch(netuid, tempo, 0); for i in 1..tempo - 1 { let current_result = SubtensorModule::blocks_until_next_epoch(netuid, tempo, i as u64); - assert_eq!(current_result, last_result.saturating_sub(1)); + assert_eq!(current_result, last_result - 1); last_result = current_result; } }); diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 058d0dfd07..aaaf93e086 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -8,6 +8,7 @@ use super::mock::*; use crate::epoch::math::safe_exp; use crate::*; +use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_ok}; // use frame_system::Config; @@ -989,7 +990,7 @@ fn test_bonds() { let sparse: bool = true; let n: u16 = 8; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); @@ -1018,7 +1019,7 @@ fn test_bonds() { SubtensorModule::set_max_allowed_validators(netuid, n); assert_eq!( SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch( netuid, 1_000_000_000 ); // run first epoch to set allowed validators - next_block(); // run to next block to ensure weights are set on nodes after their registration block + next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block // === Set weights [val->srv1: 0.1, val->srv2: 0.2, val->srv3: 0.3, val->srv4: 0.4] for uid in 0..(n/2) as u64 { @@ -1068,7 +1069,8 @@ fn test_bonds() { // === Set self-weight only on val1 let uid = 0; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* n: 8 @@ -1115,7 +1117,8 @@ fn test_bonds() { // === Set self-weight only on val2 let uid = 1; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 3 @@ -1151,7 +1154,8 @@ fn test_bonds() { // === Set self-weight only on val3 let uid = 2; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 4 @@ -1186,7 +1190,8 @@ fn test_bonds() { // === Set val3->srv4: 1 assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(2)), netuid, vec![7], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 5 @@ -1219,7 +1224,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 6 @@ -1240,7 +1246,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 7 @@ -1261,7 +1268,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 8 @@ -1286,7 +1294,7 @@ fn test_bonds_with_liquid_alpha() { let sparse: bool = true; let n: u16 = 8; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); @@ -1326,7 +1334,7 @@ fn test_bonds_with_liquid_alpha() { // Initilize with first epoch SubtensorModule::epoch(netuid, 1_000_000_000); - next_block(); + next_block_no_epoch(netuid); // Set weights for uid in 0..(n / 2) { @@ -1417,7 +1425,7 @@ fn test_bonds_with_liquid_alpha() { vec![u16::MAX], 0 )); - next_block(); + next_block_no_epoch(netuid); if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1439,7 +1447,7 @@ fn test_bonds_with_liquid_alpha() { vec![u16::MAX], 0 )); - next_block(); + next_block_no_epoch(netuid); if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1543,7 +1551,7 @@ fn test_active_stake() { let sparse: bool = true; let n: u16 = 4; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let block_number: u64 = System::block_number(); let stake: u64 = 1; add_network(netuid, tempo, 0); @@ -1586,7 +1594,7 @@ fn test_active_stake() { SubtensorModule::set_max_allowed_validators(netuid, n); assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators - next_block(); // run to next block to ensure weights are set on nodes after their registration block + next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block // === Set weights [val1->srv1: 0.5, val1->srv2: 0.5, val2->srv1: 0.5, val2->srv2: 0.5] for uid in 0..(n / 2) as u64 { @@ -1627,7 +1635,7 @@ fn test_active_stake() { } } let activity_cutoff: u64 = SubtensorModule::get_activity_cutoff(netuid) as u64; - run_to_block(activity_cutoff + 2); // run to block where validator (uid 0, 1) weights become outdated + run_to_block_no_epoch(netuid, activity_cutoff + 2); // run to block where validator (uid 0, 1) weights become outdated // === Update uid 0 weights assert_ok!(SubtensorModule::set_weights( @@ -1697,7 +1705,7 @@ fn test_active_stake() { vec![u16::MAX / (n / 2); (n / 2) as usize], 0 )); - run_to_block(activity_cutoff + 3); // run to block where validator (uid 0, 1) weights become outdated + run_to_block_no_epoch(netuid, activity_cutoff + 3); // run to block where validator (uid 0, 1) weights become outdated if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1750,7 +1758,7 @@ fn test_outdated_weights() { let sparse: bool = true; let n: u16 = 4; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 0; let mut block_number: u64 = System::block_number(); let stake: u64 = 1; add_network(netuid, tempo, 0); @@ -1796,7 +1804,7 @@ fn test_outdated_weights() { assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); - block_number = next_block(); // run to next block to ensure weights are set on nodes after their registration block + block_number = next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); // === Set weights [val1->srv1: 2/3, val1->srv2: 1/3, val2->srv1: 2/3, val2->srv2: 1/3, srv1->srv1: 1, srv2->srv2: 1] @@ -1877,7 +1885,7 @@ fn test_outdated_weights() { SubtensorModule::get_hotkey_for_net_and_uid(netuid, deregistered_uid) .expect("Not registered") ); - next_block(); // run to next block to outdate weights and bonds set on deregistered uid + next_block_no_epoch(netuid); // run to next block to outdate weights and bonds set on deregistered uid // === Update weights from only uid=0 assert_ok!(SubtensorModule::set_weights( @@ -2124,6 +2132,186 @@ fn test_zero_weights() { }); } +// Test that recently/deregistered miner bonds are cleared before EMA. +#[test] +fn test_deregistered_miner_bonds() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 4; + let netuid: u16 = 1; + let high_tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + + let stake: u64 = 1; + add_network(netuid, high_tempo, 0); + SubtensorModule::set_max_allowed_uids(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_min_allowed_weights(netuid, 0); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::set_bonds_penalty(netuid, u16::MAX); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + + // === Register [validator1, validator2, server1, server2] + let block_number = System::block_number(); + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), stake); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), + ); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stake, + ); + } + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, n); + assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); + SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); + next_block(); // run to next block to ensure weights are set on nodes after their registration block + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + + // === Set weights [val1->srv1: 2/3, val1->srv2: 1/3, val2->srv1: 2/3, val2->srv2: 1/3] + for uid in 0..(n / 2) as u64 { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![2 * (u16::MAX / 3), u16::MAX / 3], + 0 + )); + } + + // Set tempo high so we don't automatically run epochs + SubtensorModule::set_tempo(netuid, high_tempo); + + // Run 2 blocks + next_block(); + next_block(); + + // set tempo to 2 blocks + SubtensorModule::set_tempo(netuid, 2); + // Run epoch + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check the bond values for the servers + let bonds = SubtensorModule::get_bonds(netuid); + let bond_0_2 = bonds[0][2]; + let bond_0_3 = bonds[0][3]; + + // Non-zero bonds + assert!(bond_0_2 > 0); + assert!(bond_0_3 > 0); + + // Set tempo high so we don't automatically run epochs + SubtensorModule::set_tempo(netuid, high_tempo); + + // Run one more block + next_block(); + + // === Dereg server2 at uid3 (least emission) + register new key over uid3 + let new_key: u64 = n as u64; // register a new key while at max capacity, which means the least incentive uid will be deregistered + let block_number = System::block_number(); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 0, + &U256::from(new_key), + ); + assert_eq!(SubtensorModule::get_max_registrations_per_block(netuid), n); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(new_key)), + netuid, + block_number, + nonce, + work, + U256::from(new_key), + U256::from(new_key) + )); + let deregistered_uid: u16 = n - 1; // since uid=n-1 only recieved 1/3 of weight, it will get pruned first + assert_eq!( + U256::from(new_key), + SubtensorModule::get_hotkey_for_net_and_uid(netuid, deregistered_uid) + .expect("Not registered") + ); + + // Set weights again so they're active. + for uid in 0..(n / 2) as u64 { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![2 * (u16::MAX / 3), u16::MAX / 3], + 0 + )); + } + + // Run 1 block + next_block(); + // Assert block at registration happened after the last tempo + let block_at_registration = SubtensorModule::get_neuron_block_at_registration(netuid, 3); + let block_number = System::block_number(); + assert!( + block_at_registration >= block_number - 2, + "block at registration: {}, block number: {}", + block_at_registration, + block_number + ); + + // set tempo to 2 blocks + SubtensorModule::set_tempo(netuid, 2); + // Run epoch again. + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check the bond values for the servers + let bonds = SubtensorModule::get_bonds(netuid); + let bond_0_2_new = bonds[0][2]; + let bond_0_3_new = bonds[0][3]; + + // We expect the old bonds for server2, (uid3), to be reset. + // For server1, (uid2), the bond should be higher than before. + assert!( + bond_0_2_new >= bond_0_2, + "bond_0_2_new: {}, bond_0_2: {}", + bond_0_2_new, + bond_0_2 + ); + assert!( + bond_0_3_new <= bond_0_3, + "bond_0_3_new: {}, bond_0_3: {}", + bond_0_3_new, + bond_0_3 + ); + }); +} + // Test that epoch assigns validator permits to highest stake uids, varies uid interleaving and stake values. #[test] fn test_validator_permits() { @@ -2773,6 +2961,96 @@ fn test_blocks_since_last_step() { }); } +#[test] +fn test_can_set_self_weight_as_subnet_owner() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey: U256 = U256::from(1); + let subnet_owner_hotkey: U256 = U256::from(1 + 456); + + let other_hotkey: U256 = U256::from(2); + + let stake = 5_000_000_000_000; // 5k TAO + let to_emit: u64 = 1_000_000_000; // 1 TAO + + // Create subnet + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + // Register the other hotkey + register_ok_neuron(netuid, other_hotkey, subnet_owner_coldkey, 0); + + // Add stake to owner hotkey. + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &subnet_owner_hotkey, + &subnet_owner_coldkey, + netuid, + stake, + ); + + // Give vpermits to owner hotkey ONLY + ValidatorPermit::::insert(netuid, vec![true, false]); + + // Set weight of 50% to each hotkey. + // This includes a self-weight + let fifty_percent: u16 = u16::MAX / 2; + Weights::::insert(netuid, 0, vec![(0, fifty_percent), (1, fifty_percent)]); + + step_block(1); + // Set updated so weights are valid + LastUpdate::::insert(netuid, vec![2, 0]); + + // Run epoch + let hotkey_emission: Vec<(U256, u64, u64)> = SubtensorModule::epoch(netuid, to_emit); + + // hotkey_emission is [(hotkey, incentive, dividend)] + assert_eq!(hotkey_emission.len(), 2); + assert_eq!(hotkey_emission[0].0, subnet_owner_hotkey); + assert_eq!(hotkey_emission[1].0, other_hotkey); + + log::debug!("hotkey_emission: {:?}", hotkey_emission); + // Both should have received incentive emission + assert!(hotkey_emission[0].1 > 0); + assert!(hotkey_emission[1].1 > 0); + + // Their incentive should be equal + assert_eq!(hotkey_emission[0].1, hotkey_emission[1].1); + }); +} + +#[test] +fn test_epoch_outputs_single_staker_registered_no_weights() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let high_tempo: u16 = u16::MAX - 1; // Don't run automatically. + add_network(netuid, high_tempo, 0); + + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha: u64 = 1_000_000_000; + let hotkey_emission: Vec<(U256, u64, u64)> = SubtensorModule::epoch(netuid, pending_alpha); + + let sum_incentives: u64 = hotkey_emission + .iter() + .map(|(_, incentive, _)| incentive) + .sum(); + let sum_dividends: u64 = hotkey_emission + .iter() + .map(|(_, _, dividend)| dividend) + .sum(); + + assert_abs_diff_eq!( + sum_incentives.saturating_add(sum_dividends), + pending_alpha, + epsilon = 1_000 + ); + }); +} + // Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, // of which the first 64 are validators, the graph is split into a major and minor set, each setting // specific weight on itself and the complement on the other. diff --git a/pallets/subtensor/src/tests/evm.rs b/pallets/subtensor/src/tests/evm.rs new file mode 100644 index 0000000000..bdd55c1961 --- /dev/null +++ b/pallets/subtensor/src/tests/evm.rs @@ -0,0 +1,246 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::unwrap_used, + clippy::indexing_slicing +)] + +use super::mock::*; +use crate::*; +use frame_support::testing_prelude::*; +use sp_core::{H160, Pair, U256, blake2_256, ecdsa, keccak_256}; + +fn public_to_evm_key(pubkey: &ecdsa::Public) -> H160 { + use libsecp256k1::PublicKey; + use sp_core::keccak_256; + + let secp_pub = PublicKey::parse_compressed(&pubkey.0).expect("Invalid pubkey"); + let uncompressed = secp_pub.serialize(); // 65 bytes: 0x04 + X + Y + let hash = keccak_256(&uncompressed[1..]); // drop 0x04 + let mut address = [0u8; 20]; + address.copy_from_slice(&hash[12..]); + H160::from(address) +} + +#[test] +fn test_associate_evm_key_success() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = frame_system::Pallet::::block_number(); + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let mut message = [0u8; 64]; + message[..32].copy_from_slice(hotkey_bytes.as_ref()); + message[32..].copy_from_slice(hashed_block_number.as_ref()); + let hashed_message = keccak_256(message.as_ref()); + let signature = pair.sign_prehashed(&hashed_message); + + assert_ok!(SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + )); + + System::assert_last_event( + Event::EvmKeyAssociated { + netuid, + hotkey, + evm_key, + block_associated: block_number, + } + .into(), + ); + }); +} + +#[test] +fn test_associate_evm_key_different_block_number_success() { + new_test_ext(100).execute_with(|| { + let netuid: u16 = 1; + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = 99u64; + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let mut message = [0u8; 64]; + message[..32].copy_from_slice(hotkey_bytes.as_ref()); + message[32..].copy_from_slice(hashed_block_number.as_ref()); + let hashed_message = keccak_256(message.as_ref()); + let signature = pair.sign_prehashed(&hashed_message); + + assert_ok!(SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + )); + + System::assert_last_event( + Event::EvmKeyAssociated { + netuid, + hotkey, + evm_key, + block_associated: frame_system::Pallet::::block_number(), + } + .into(), + ); + }); +} + +#[test] +fn test_associate_evm_key_coldkey_does_not_own_hotkey() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = frame_system::Pallet::::block_number(); + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let mut message = [0u8; 64]; + message[..32].copy_from_slice(hotkey_bytes.as_ref()); + message[32..].copy_from_slice(hashed_block_number.as_ref()); + let hashed_message = keccak_256(message.as_ref()); + let signature = pair.sign_prehashed(&hashed_message); + + assert_err!( + SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + ), + Error::::NonAssociatedColdKey + ); + }); +} + +#[test] +fn test_associate_evm_key_hotkey_not_registered_in_subnet() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = frame_system::Pallet::::block_number(); + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let mut message = [0u8; 64]; + message[..32].copy_from_slice(hotkey_bytes.as_ref()); + message[32..].copy_from_slice(hashed_block_number.as_ref()); + let hashed_message = keccak_256(message.as_ref()); + let signature = pair.sign_prehashed(&hashed_message); + + assert_err!( + SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + ), + Error::::HotKeyNotRegisteredInSubNet + ); + }); +} + +#[test] +fn test_associate_evm_key_using_wrong_hash_function() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = frame_system::Pallet::::block_number(); + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let mut message = [0u8; 64]; + message[..32].copy_from_slice(hotkey_bytes.as_ref()); + message[32..].copy_from_slice(hashed_block_number.as_ref()); + let hashed_message = blake2_256(message.as_ref()); + let signature = pair.sign_prehashed(&hashed_message); + + assert_err!( + SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + ), + Error::::InvalidRecoveredPublicKey + ); + }); +} diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index 19bab75b4e..c70da2c9d2 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -58,7 +58,13 @@ fn assert_sparse_mat_compare( ) { assert!(ma.len() == mb.len()); for row in 0..ma.len() { - assert!(ma[row].len() == mb[row].len()); + assert!( + ma[row].len() == mb[row].len(), + "row: {}, ma: {:?}, mb: {:?}", + row, + ma[row], + mb[row] + ); for j in 0..ma[row].len() { assert!(ma[row][j].0 == mb[row][j].0); // u16 assert_float_compare(ma[row][j].1, mb[row][j].1, epsilon) // I32F32 @@ -1034,6 +1040,27 @@ fn test_math_inplace_mask_diag() { ); } +#[test] +fn test_math_inplace_mask_diag_except_index() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let rows = 3; + + for i in 0..rows { + let mut target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; + let row = i * rows; + let col = i; + target[row + col] = vector[row + col]; + + let mut mat = vec_to_mat_fixed(&vector, rows, false); + inplace_mask_diag_except_index(&mut mat, i as u16); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, rows, false), + I32F32::from_num(0), + ); + } +} + #[test] fn test_math_mask_rows_sparse() { let input: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; @@ -1105,6 +1132,58 @@ fn test_math_mask_diag_sparse() { ); } +#[test] +fn test_math_mask_diag_sparse_except_index() { + let rows = 3; + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, rows, false); + + for i in 0..rows { + let mut target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; + let row = i * rows; + let col = i; + target[row + col] = vector[row + col]; + + let result = mask_diag_sparse_except_index(&mat, i as u16); + let target_as_mat = vec_to_sparse_mat_fixed(&target, rows, false); + + assert_sparse_mat_compare(&result, &target_as_mat, I32F32::from_num(0)); + } + + let vector: Vec = vec![1., 0., 0., 0., 5., 0., 0., 0., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, rows, false); + + for i in 0..rows { + let mut target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let row = i * rows; + let col = i; + target[row + col] = vector[row + col]; + + let result = mask_diag_sparse_except_index(&mat, i as u16); + let target_as_mat = vec_to_sparse_mat_fixed(&target, rows, false); + assert_eq!(result.len(), target_as_mat.len()); + + assert_sparse_mat_compare(&result, &target_as_mat, I32F32::from_num(0)); + } + + for i in 0..rows { + let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, rows, false); + + let mut target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let row = i * rows; + let col = i; + target[row + col] = vector[row + col]; + + let result = mask_diag_sparse_except_index(&mat, i as u16); + let target_as_mat = vec_to_sparse_mat_fixed(&target, rows, false); + assert_eq!(result.len(), target_as_mat.len()); + + assert_sparse_mat_compare(&result, &target_as_mat, I32F32::from_num(0)); + } +} + #[test] fn test_math_vec_mask_sparse_matrix() { let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; @@ -1141,6 +1220,45 @@ fn test_math_vec_mask_sparse_matrix() { ); } +#[test] +fn test_math_scalar_vec_mask_sparse_matrix() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 8., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 1; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a == b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![1., 2., 0., 4., 5., 0., 7., 8., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 5; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a <= b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 0., 3., 0., 0., 6., 0., 0., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 5; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a >= b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + #[test] fn test_math_row_hadamard() { let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 2b2feae4b8..5efc4f152a 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -2,6 +2,8 @@ use super::mock::*; use crate::*; +use alloc::collections::BTreeMap; +use approx::assert_abs_diff_eq; use codec::{Decode, Encode}; use frame_support::{ StorageHasher, Twox64Concat, assert_ok, @@ -9,6 +11,7 @@ use frame_support::{ traits::{StorageInstance, StoredMap}, weights::Weight, }; + use frame_system::Config; use sp_core::{H256, U256, crypto::Ss58Codec}; use sp_io::hashing::twox_128; @@ -32,20 +35,21 @@ fn test_initialise_ti() { use frame_support::traits::OnRuntimeUpgrade; new_test_ext(1).execute_with(|| { - crate::SubnetLocked::::insert(1, 100); - crate::SubnetLocked::::insert(2, 5); pallet_balances::TotalIssuance::::put(1000); - crate::TotalStake::::put(25); + crate::SubnetTAO::::insert(1, 100); + crate::SubnetTAO::::insert(2, 5); // Ensure values are NOT initialized prior to running migration assert!(crate::TotalIssuance::::get() == 0); + assert!(crate::TotalStake::::get() == 0); crate::migrations::migrate_init_total_issuance::initialise_total_issuance::Migration::::on_runtime_upgrade(); // Ensure values were initialized correctly + assert!(crate::TotalStake::::get() == 105); assert!( crate::TotalIssuance::::get() - == 105u64.saturating_add(1000).saturating_add(25) + == 105u64.saturating_add(1000) ); }); } @@ -413,3 +417,141 @@ fn test_migrate_subnet_volume() { assert_eq!(new_value, Some(old_value as u128)); }); } + +#[test] +fn test_migrate_set_first_emission_block_number() { + new_test_ext(1).execute_with(|| { + let netuids: [u16; 3] = [1, 2, 3]; + let block_number = 100; + for netuid in netuids.iter() { + add_network(*netuid, 1, 0); + } + run_to_block(block_number); + let weight = crate::migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::(); + + let expected_weight: Weight = ::DbWeight::get().reads(3) + ::DbWeight::get().writes(netuids.len() as u64); + assert_eq!(weight, expected_weight); + + assert_eq!(FirstEmissionBlockNumber::::get(0), None); + for netuid in netuids.iter() { + assert_eq!(FirstEmissionBlockNumber::::get(netuid), Some(block_number)); + } +}); +} + +#[test] +fn test_migrate_remove_zero_total_hotkey_alpha() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_remove_zero_total_hotkey_alpha"; + let netuid = 1u16; + + let hotkey_zero = U256::from(100u64); + let hotkey_nonzero = U256::from(101u64); + + // Insert one zero-alpha entry and one non-zero entry + TotalHotkeyAlpha::::insert(hotkey_zero, netuid, 0u64); + TotalHotkeyAlpha::::insert(hotkey_nonzero, netuid, 123u64); + + assert_eq!(TotalHotkeyAlpha::::get(hotkey_zero, netuid), 0u64); + assert_eq!(TotalHotkeyAlpha::::get(hotkey_nonzero, netuid), 123u64); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet." + ); + + let weight = crate::migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::(); + + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as run." + ); + + assert!( + !TotalHotkeyAlpha::::contains_key(hotkey_zero, netuid), + "Zero-alpha entry should have been removed." + ); + + assert_eq!(TotalHotkeyAlpha::::get(hotkey_nonzero, netuid), 123u64); + + assert!( + !weight.is_zero(), + "Migration weight should be non-zero." + ); + }); +} + +#[test] +fn test_migrate_revealed_commitments() { + new_test_ext(1).execute_with(|| { + // -------------------------------- + // Step 1: Simulate Old Storage Entries + // -------------------------------- + const MIGRATION_NAME: &str = "migrate_revealed_commitments_v2"; + + // Pallet prefix == twox_128("Commitments") + let pallet_prefix = twox_128("Commitments".as_bytes()); + // Storage item prefix == twox_128("RevealedCommitments") + let storage_prefix = twox_128("RevealedCommitments".as_bytes()); + + // Example keys for the DoubleMap: + // Key1 (netuid) uses Identity (no hash) + // Key2 (account) uses Twox64Concat + let netuid: u16 = 123; + let account_id: u64 = 999; // Or however your test `AccountId` is represented + + // Construct the full storage key for `RevealedCommitments(netuid, account_id)` + let mut storage_key = Vec::new(); + storage_key.extend_from_slice(&pallet_prefix); + storage_key.extend_from_slice(&storage_prefix); + + // Identity for netuid => no hashing, just raw encode + storage_key.extend_from_slice(&netuid.encode()); + + // Twox64Concat for account + let account_hashed = Twox64Concat::hash(&account_id.encode()); + storage_key.extend_from_slice(&account_hashed); + + // Simulate an old value we might have stored: + // For example, the old type was `RevealedData` + // We'll just store a random encoded value for demonstration + let old_value = (vec![1, 2, 3, 4], 42u64); + put_raw(&storage_key, &old_value.encode()); + + // Confirm the storage value is set + let stored_value = get_raw(&storage_key).expect("Expected to get a value"); + let decoded_value = <(Vec, u64)>::decode(&mut &stored_value[..]) + .expect("Failed to decode the old revealed commitments"); + assert_eq!(decoded_value, old_value); + + // Also confirm that the migration has NOT run yet + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet" + ); + + // -------------------------------- + // Step 2: Run the Migration + // -------------------------------- + let weight = crate::migrations::migrate_upgrade_revealed_commitments::migrate_upgrade_revealed_commitments::(); + + // Migration should be marked as run + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should now be marked as run" + ); + + // -------------------------------- + // Step 3: Verify Migration Effects + // -------------------------------- + // The old key/value should be removed + let stored_value_after = get_raw(&storage_key); + assert!( + stored_value_after.is_none(), + "Old storage entry should be cleared" + ); + + // Weight returned should be > 0 (some cost was incurred clearing storage) + assert!(!weight.is_zero(), "Migration weight should be non-zero"); + }); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 79da754815..c4fb5a5f29 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -1,5 +1,6 @@ #![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] use crate::utils::rate_limiting::TransactionType; +use frame_support::PalletId; use frame_support::derive_impl; use frame_support::dispatch::DispatchResultWithPostInfo; use frame_support::weights::Weight; @@ -18,6 +19,8 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, }; use sp_std::cmp::Ordering; +use substrate_fixed::types::U64F64; +use subtensor_swap_interface::LiquidityDataProvider; use crate::*; @@ -38,6 +41,7 @@ frame_support::construct_runtime!( Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 9, Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 10, Drand: pallet_drand::{Pallet, Call, Storage, Event} = 11, + Swap: pallet_subtensor_swap::{Pallet, Call, Storage, Event} = 12, } ); @@ -138,7 +142,7 @@ parameter_types! { pub const InitialImmunityPeriod: u16 = 2; pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; - pub const InitialBondsPenalty:u16 = 0; + pub const InitialBondsPenalty:u16 = u16::MAX; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production @@ -152,7 +156,7 @@ parameter_types! { pub const InitialTxDelegateTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialTxChildKeyTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialBurn: u64 = 0; - pub const InitialMinBurn: u64 = 0; + pub const InitialMinBurn: u64 = 500_000; pub const InitialMaxBurn: u64 = 1_000_000_000; pub const InitialValidatorPruneLen: u64 = 0; pub const InitialScalingLawPower: u16 = 50; @@ -184,6 +188,8 @@ parameter_types! { pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days pub const InitialTaoWeight: u64 = 0; // 100% global weight. + pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // Default as 7 days } // Configure collective pallet for council @@ -406,6 +412,46 @@ impl crate::Config for Test { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; + type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; + type SwapInterface = Swap; +} + +impl LiquidityDataProvider for SubtensorModule { + fn tao_reserve(netuid: u16) -> u64 { + SubnetTAO::::get(netuid) + } + + fn alpha_reserve(netuid: u16) -> u64 { + SubnetAlphaIn::::get(netuid) + } + + fn tao_balance(account_id: &AccountId) -> u64 { + Balances::free_balance(account_id) + } + + fn alpha_balance(netuid: u16, account_id: &AccountId) -> u64 { + TotalHotkeyAlpha::::get(account_id, netuid) + } +} + +// Swap-related parameter types +parameter_types! { + pub const SwapProtocolId: PalletId = PalletId(*b"ten/swap"); + pub const SwapMaxFeeRate: u16 = 10000; // 15.26% + pub const SwapMaxPositions: u32 = 100; + pub const SwapMinimumLiquidity: u64 = 1_000; +} + +impl pallet_subtensor_swap::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AdminOrigin = EnsureRoot; + type LiquidityDataProvider = SubtensorModule; + type ProtocolId = SwapProtocolId; + type MaxFeeRate = SwapMaxFeeRate; + type MaxPositions = SwapMaxPositions; + type MinimumLiquidity = SwapMinimumLiquidity; + type WeightInfo = (); } pub struct OriginPrivilegeCmp; @@ -590,6 +636,30 @@ pub(crate) fn run_to_block(n: u64) { } } +#[allow(dead_code)] +pub(crate) fn next_block_no_epoch(netuid: u16) -> u64 { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + let new_block = next_block(); + SubtensorModule::set_tempo(netuid, old_tempo); + + new_block +} + +#[allow(dead_code)] +pub(crate) fn run_to_block_no_epoch(netuid: u16, n: u64) { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + run_to_block(n); + SubtensorModule::set_tempo(netuid, old_tempo); +} + #[allow(dead_code)] pub(crate) fn step_epochs(count: u16, netuid: u16) { for _ in 0..count { @@ -660,6 +730,14 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::init_new_network(netuid, tempo); SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); + FirstEmissionBlockNumber::::insert(netuid, 1); +} + +#[allow(dead_code)] +pub fn add_network_without_emission_block(netuid: u16, tempo: u16, _modality: u16) { + SubtensorModule::init_new_network(netuid, tempo); + SubtensorModule::set_network_registration_allowed(netuid, true); + SubtensorModule::set_network_pow_registration_allowed(netuid, true); } #[allow(dead_code)] @@ -668,6 +746,22 @@ pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { let lock_cost = SubtensorModule::get_network_lock_cost(); SubtensorModule::add_balance_to_coldkey_account(coldkey, lock_cost); + assert_ok!(SubtensorModule::register_network( + RawOrigin::Signed(*coldkey).into(), + *hotkey + )); + NetworkRegistrationAllowed::::insert(netuid, true); + NetworkPowRegistrationAllowed::::insert(netuid, true); + FirstEmissionBlockNumber::::insert(netuid, 0); + netuid +} + +#[allow(dead_code)] +pub fn add_dynamic_network_without_emission_block(hotkey: &U256, coldkey: &U256) -> u16 { + let netuid = SubtensorModule::get_next_netuid(); + let lock_cost = SubtensorModule::get_network_lock_cost(); + SubtensorModule::add_balance_to_coldkey_account(coldkey, lock_cost); + assert_ok!(SubtensorModule::register_network( RawOrigin::Signed(*coldkey).into(), *hotkey diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 6865c9fa49..ce891e5615 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -5,17 +5,20 @@ mod delegate_info; mod difficulty; mod emission; mod epoch; +mod evm; mod math; mod migration; mod mock; mod move_stake; mod networks; mod neuron_info; +mod recycle_alpha; mod registration; mod senate; mod serving; mod staking; mod staking2; +mod subnet; mod swap_coldkey; mod swap_hotkey; mod uids; diff --git a/pallets/subtensor/src/tests/move_stake.rs b/pallets/subtensor/src/tests/move_stake.rs index afaee9b980..0b7584a4f0 100644 --- a/pallets/subtensor/src/tests/move_stake.rs +++ b/pallets/subtensor/src/tests/move_stake.rs @@ -3,7 +3,7 @@ use crate::*; use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; use sp_core::{Get, U256}; -use substrate_fixed::types::{I96F32, U64F64}; +use substrate_fixed::types::{U64F64, U96F32}; // 1. test_do_move_success // Description: Test a successful move of stake between two hotkeys in the same subnet @@ -112,9 +112,9 @@ fn test_do_move_different_subnets() { ), 0 ); - let alpha_fee: I96F32 = - I96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); - let expected_value = I96F32::from_num(alpha) + let alpha_fee: U96F32 = + U96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); + let expected_value = U96F32::from_num(alpha) * SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(destination_netuid); assert_abs_diff_eq!( @@ -709,8 +709,8 @@ fn test_do_move_storage_updates() { 0 ); let alpha_fee = - I96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); - let alpha2 = I96F32::from_num(alpha) * SubtensorModule::get_alpha_price(origin_netuid) + U96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); + let alpha2 = U96F32::from_num(alpha) * SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(destination_netuid); assert_abs_diff_eq!( SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( @@ -1080,7 +1080,7 @@ fn test_do_transfer_different_subnets() { &destination_coldkey, destination_netuid, ); - let expected_value = I96F32::from_num(stake_amount - fee) + let expected_value = U96F32::from_num(stake_amount - fee) / SubtensorModule::get_alpha_price(destination_netuid); assert_abs_diff_eq!( dest_stake, @@ -1134,8 +1134,8 @@ fn test_do_swap_success() { destination_netuid, ); let alpha_fee = - I96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); - let expected_value = I96F32::from_num(alpha_before) + U96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); + let expected_value = U96F32::from_num(alpha_before) * SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(destination_netuid); assert_abs_diff_eq!( @@ -1353,8 +1353,8 @@ fn test_do_swap_partial_stake() { ); let alpha_fee = - I96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); - let expected_value = I96F32::from_num(swap_amount) + U96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); + let expected_value = U96F32::from_num(swap_amount) * SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(destination_netuid); assert_abs_diff_eq!( @@ -1408,8 +1408,8 @@ fn test_do_swap_storage_updates() { ); let alpha_fee = - I96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); - let expected_value = I96F32::from_num(alpha) + U96F32::from_num(fee) / SubtensorModule::get_alpha_price(destination_netuid); + let expected_value = U96F32::from_num(alpha) * SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(destination_netuid); assert_abs_diff_eq!( @@ -1543,7 +1543,7 @@ fn test_swap_stake_limit_validate() { // Setup limit price so that it doesn't allow much slippage at all let limit_price = ((SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(destination_netuid)) - * I96F32::from_num(1_000_000_000)) + * U96F32::from_num(1_000_000_000)) .to_num::() - 1_u64; @@ -1737,8 +1737,8 @@ fn test_move_stake_specific_stake_into_subnet_fail() { 0 ); let fee = DefaultStakingFee::::get(); - let alpha_fee: I96F32 = I96F32::from_num(fee) / SubtensorModule::get_alpha_price(netuid); - let expected_value = I96F32::from_num(alpha_to_move) + let alpha_fee: U96F32 = U96F32::from_num(fee) / SubtensorModule::get_alpha_price(netuid); + let expected_value = U96F32::from_num(alpha_to_move) * SubtensorModule::get_alpha_price(origin_netuid) / SubtensorModule::get_alpha_price(netuid); assert_abs_diff_eq!( diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index f367a01d02..7dda0502c1 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1,6 +1,5 @@ use super::mock::*; use crate::*; -use crate::{ColdkeySwapScheduleDuration, DissolveNetworkScheduleDuration, Event}; use frame_support::assert_ok; use frame_system::Config; use sp_core::U256; @@ -42,245 +41,245 @@ fn test_registration_ok() { }) } -#[test] -fn test_schedule_dissolve_network_execution() { - new_test_ext(1).execute_with(|| { - let block_number: u64 = 0; - let netuid: u16 = 2; - let tempo: u16 = 13; - let hotkey_account_id: U256 = U256::from(1); - let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - 129123813, - &hotkey_account_id, - ); - - //add network - add_network(netuid, tempo, 0); - - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id), - netuid, - block_number, - nonce, - work.clone(), - hotkey_account_id, - coldkey_account_id - )); - - assert!(SubtensorModule::if_subnet_exist(netuid)); - - assert_ok!(SubtensorModule::schedule_dissolve_network( - <::RuntimeOrigin>::signed(coldkey_account_id), - netuid - )); - - let current_block = System::block_number(); - let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); - - System::assert_last_event( - Event::DissolveNetworkScheduled { - account: coldkey_account_id, - netuid, - execution_block, - } - .into(), - ); - - run_to_block(execution_block); - assert!(!SubtensorModule::if_subnet_exist(netuid)); - }) -} - -#[test] -fn test_non_owner_schedule_dissolve_network_execution() { - new_test_ext(1).execute_with(|| { - let block_number: u64 = 0; - let netuid: u16 = 2; - let tempo: u16 = 13; - let hotkey_account_id: U256 = U256::from(1); - let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har - let non_network_owner_account_id = U256::from(2); // - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - 129123813, - &hotkey_account_id, - ); - - //add network - add_network(netuid, tempo, 0); - - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id), - netuid, - block_number, - nonce, - work.clone(), - hotkey_account_id, - coldkey_account_id - )); - - assert!(SubtensorModule::if_subnet_exist(netuid)); - - assert_ok!(SubtensorModule::schedule_dissolve_network( - <::RuntimeOrigin>::signed(non_network_owner_account_id), - netuid - )); - - let current_block = System::block_number(); - let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); - - System::assert_last_event( - Event::DissolveNetworkScheduled { - account: non_network_owner_account_id, - netuid, - execution_block, - } - .into(), - ); - - run_to_block(execution_block); - // network exists since the caller is no the network owner - assert!(SubtensorModule::if_subnet_exist(netuid)); - }) -} - -#[test] -fn test_new_owner_schedule_dissolve_network_execution() { - new_test_ext(1).execute_with(|| { - let block_number: u64 = 0; - let netuid: u16 = 2; - let tempo: u16 = 13; - let hotkey_account_id: U256 = U256::from(1); - let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har - let new_network_owner_account_id = U256::from(2); // - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - 129123813, - &hotkey_account_id, - ); - - //add network - add_network(netuid, tempo, 0); - - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id), - netuid, - block_number, - nonce, - work.clone(), - hotkey_account_id, - coldkey_account_id - )); - - assert!(SubtensorModule::if_subnet_exist(netuid)); - - // the account is not network owner when schedule the call - assert_ok!(SubtensorModule::schedule_dissolve_network( - <::RuntimeOrigin>::signed(new_network_owner_account_id), - netuid - )); - - let current_block = System::block_number(); - let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); - - System::assert_last_event( - Event::DissolveNetworkScheduled { - account: new_network_owner_account_id, - netuid, - execution_block, - } - .into(), - ); - run_to_block(current_block + 1); - // become network owner after call scheduled - crate::SubnetOwner::::insert(netuid, new_network_owner_account_id); - - run_to_block(execution_block); - // network exists since the caller is no the network owner - assert!(!SubtensorModule::if_subnet_exist(netuid)); - }) -} - -#[test] -fn test_schedule_dissolve_network_execution_with_coldkey_swap() { - new_test_ext(1).execute_with(|| { - let block_number: u64 = 0; - let netuid: u16 = 2; - let tempo: u16 = 13; - let hotkey_account_id: U256 = U256::from(1); - let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har - let new_network_owner_account_id = U256::from(2); // - - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 1000000000000000); - - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - 129123813, - &hotkey_account_id, - ); - - //add network - add_network(netuid, tempo, 0); - - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id), - netuid, - block_number, - nonce, - work.clone(), - hotkey_account_id, - coldkey_account_id - )); - - assert!(SubtensorModule::if_subnet_exist(netuid)); - - // the account is not network owner when schedule the call - assert_ok!(SubtensorModule::schedule_swap_coldkey( - <::RuntimeOrigin>::signed(coldkey_account_id), - new_network_owner_account_id - )); - - let current_block = System::block_number(); - let execution_block = current_block + ColdkeySwapScheduleDuration::::get(); - - run_to_block(execution_block - 1); - - // the account is not network owner when schedule the call - assert_ok!(SubtensorModule::schedule_dissolve_network( - <::RuntimeOrigin>::signed(new_network_owner_account_id), - netuid - )); - - System::assert_last_event( - Event::DissolveNetworkScheduled { - account: new_network_owner_account_id, - netuid, - execution_block: DissolveNetworkScheduleDuration::::get() + execution_block - - 1, - } - .into(), - ); - - run_to_block(execution_block); - assert_eq!( - crate::SubnetOwner::::get(netuid), - new_network_owner_account_id - ); - - let current_block = System::block_number(); - let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); - - run_to_block(execution_block); - // network exists since the caller is no the network owner - assert!(!SubtensorModule::if_subnet_exist(netuid)); - }) -} +// #[test] +// fn test_schedule_dissolve_network_execution() { +// new_test_ext(1).execute_with(|| { +// let block_number: u64 = 0; +// let netuid: u16 = 2; +// let tempo: u16 = 13; +// let hotkey_account_id: U256 = U256::from(1); +// let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har +// let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( +// netuid, +// block_number, +// 129123813, +// &hotkey_account_id, +// ); + +// //add network +// add_network(netuid, tempo, 0); + +// assert_ok!(SubtensorModule::register( +// <::RuntimeOrigin>::signed(hotkey_account_id), +// netuid, +// block_number, +// nonce, +// work.clone(), +// hotkey_account_id, +// coldkey_account_id +// )); + +// assert!(SubtensorModule::if_subnet_exist(netuid)); + +// assert_ok!(SubtensorModule::schedule_dissolve_network( +// <::RuntimeOrigin>::signed(coldkey_account_id), +// netuid +// )); + +// let current_block = System::block_number(); +// let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + +// System::assert_last_event( +// Event::DissolveNetworkScheduled { +// account: coldkey_account_id, +// netuid, +// execution_block, +// } +// .into(), +// ); + +// run_to_block(execution_block); +// assert!(!SubtensorModule::if_subnet_exist(netuid)); +// }) +// } + +// #[test] +// fn test_non_owner_schedule_dissolve_network_execution() { +// new_test_ext(1).execute_with(|| { +// let block_number: u64 = 0; +// let netuid: u16 = 2; +// let tempo: u16 = 13; +// let hotkey_account_id: U256 = U256::from(1); +// let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har +// let non_network_owner_account_id = U256::from(2); // +// let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( +// netuid, +// block_number, +// 129123813, +// &hotkey_account_id, +// ); + +// //add network +// add_network(netuid, tempo, 0); + +// assert_ok!(SubtensorModule::register( +// <::RuntimeOrigin>::signed(hotkey_account_id), +// netuid, +// block_number, +// nonce, +// work.clone(), +// hotkey_account_id, +// coldkey_account_id +// )); + +// assert!(SubtensorModule::if_subnet_exist(netuid)); + +// assert_ok!(SubtensorModule::schedule_dissolve_network( +// <::RuntimeOrigin>::signed(non_network_owner_account_id), +// netuid +// )); + +// let current_block = System::block_number(); +// let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + +// System::assert_last_event( +// Event::DissolveNetworkScheduled { +// account: non_network_owner_account_id, +// netuid, +// execution_block, +// } +// .into(), +// ); + +// run_to_block(execution_block); +// // network exists since the caller is no the network owner +// assert!(SubtensorModule::if_subnet_exist(netuid)); +// }) +// } + +// #[test] +// fn test_new_owner_schedule_dissolve_network_execution() { +// new_test_ext(1).execute_with(|| { +// let block_number: u64 = 0; +// let netuid: u16 = 2; +// let tempo: u16 = 13; +// let hotkey_account_id: U256 = U256::from(1); +// let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har +// let new_network_owner_account_id = U256::from(2); // +// let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( +// netuid, +// block_number, +// 129123813, +// &hotkey_account_id, +// ); + +// //add network +// add_network(netuid, tempo, 0); + +// assert_ok!(SubtensorModule::register( +// <::RuntimeOrigin>::signed(hotkey_account_id), +// netuid, +// block_number, +// nonce, +// work.clone(), +// hotkey_account_id, +// coldkey_account_id +// )); + +// assert!(SubtensorModule::if_subnet_exist(netuid)); + +// // the account is not network owner when schedule the call +// assert_ok!(SubtensorModule::schedule_dissolve_network( +// <::RuntimeOrigin>::signed(new_network_owner_account_id), +// netuid +// )); + +// let current_block = System::block_number(); +// let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + +// System::assert_last_event( +// Event::DissolveNetworkScheduled { +// account: new_network_owner_account_id, +// netuid, +// execution_block, +// } +// .into(), +// ); +// run_to_block(current_block + 1); +// // become network owner after call scheduled +// crate::SubnetOwner::::insert(netuid, new_network_owner_account_id); + +// run_to_block(execution_block); +// // network exists since the caller is no the network owner +// assert!(!SubtensorModule::if_subnet_exist(netuid)); +// }) +// } + +// #[test] +// fn test_schedule_dissolve_network_execution_with_coldkey_swap() { +// new_test_ext(1).execute_with(|| { +// let block_number: u64 = 0; +// let netuid: u16 = 2; +// let tempo: u16 = 13; +// let hotkey_account_id: U256 = U256::from(1); +// let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har +// let new_network_owner_account_id = U256::from(2); // + +// SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 1000000000000000); + +// let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( +// netuid, +// block_number, +// 129123813, +// &hotkey_account_id, +// ); + +// //add network +// add_network(netuid, tempo, 0); + +// assert_ok!(SubtensorModule::register( +// <::RuntimeOrigin>::signed(hotkey_account_id), +// netuid, +// block_number, +// nonce, +// work.clone(), +// hotkey_account_id, +// coldkey_account_id +// )); + +// assert!(SubtensorModule::if_subnet_exist(netuid)); + +// // the account is not network owner when schedule the call +// assert_ok!(SubtensorModule::schedule_swap_coldkey( +// <::RuntimeOrigin>::signed(coldkey_account_id), +// new_network_owner_account_id +// )); + +// let current_block = System::block_number(); +// let execution_block = current_block + ColdkeySwapScheduleDuration::::get(); + +// run_to_block(execution_block - 1); + +// // the account is not network owner when schedule the call +// assert_ok!(SubtensorModule::schedule_dissolve_network( +// <::RuntimeOrigin>::signed(new_network_owner_account_id), +// netuid +// )); + +// System::assert_last_event( +// Event::DissolveNetworkScheduled { +// account: new_network_owner_account_id, +// netuid, +// execution_block: DissolveNetworkScheduleDuration::::get() + execution_block +// - 1, +// } +// .into(), +// ); + +// run_to_block(execution_block); +// assert_eq!( +// crate::SubnetOwner::::get(netuid), +// new_network_owner_account_id +// ); + +// let current_block = System::block_number(); +// let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + +// run_to_block(execution_block); +// // network exists since the caller is no the network owner +// assert!(!SubtensorModule::if_subnet_exist(netuid)); +// }) +// } // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::networks::test_register_subnet_low_lock_cost --exact --show-output --nocapture #[test] diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs new file mode 100644 index 0000000000..b142e5d3c9 --- /dev/null +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -0,0 +1,559 @@ +use approx::assert_abs_diff_eq; +use frame_support::{assert_noop, assert_ok, traits::Currency}; +use sp_core::U256; + +use super::mock::*; +use crate::*; + +#[test] +fn test_recycle_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < initial_alpha + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_two_stakers() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_staker_is_nominator() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + // Note: this coldkey DOES NOT own the hotkey, so it is a nominator. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + // Verify the ownership + assert_ne!( + SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey), + other_coldkey + ); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle from nominator coldkey + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(other_coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ) < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_burn_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_burn_staker_is_nominator() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + // Note: this coldkey DOES NOT own the hotkey, so it is a nominator. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn from nominator coldkey + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(other_coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ) < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_burn_two_stakers() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn from coldkey + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_errors() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let wrong_hotkey = U256::from(3); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + // Create root subnet + migrations::migrate_create_root_network::migrate_create_root_network::(); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake_amount = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + 99 // non-existent subnet + ), + Error::::SubNetworkDoesNotExist + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + SubtensorModule::get_root_netuid(), + ), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + wrong_hotkey, + 100_000, + netuid + ), + Error::::HotKeyAccountNotExists + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 10_000_000_000, // too much + netuid + ), + Error::::NotEnoughStakeToWithdraw + ); + + // make it pass the stake check + TotalHotkeyAlpha::::set( + hotkey, + netuid, + SubnetAlphaOut::::get(netuid).saturating_mul(2), + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + SubnetAlphaOut::::get(netuid) + 1, + netuid + ), + Error::::InsufficientLiquidity + ); + }); +} + +#[test] +fn test_burn_errors() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let wrong_hotkey = U256::from(3); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + // Create root subnet + migrations::migrate_create_root_network::migrate_create_root_network::(); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake_amount = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + 99 // non-existent subnet + ), + Error::::SubNetworkDoesNotExist + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + SubtensorModule::get_root_netuid(), + ), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + wrong_hotkey, + 100_000, + netuid + ), + Error::::HotKeyAccountNotExists + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 10_000_000_000, // too much + netuid + ), + Error::::NotEnoughStakeToWithdraw + ); + + // make it pass the hotkey alpha check + TotalHotkeyAlpha::::set( + hotkey, + netuid, + SubnetAlphaOut::::get(netuid).saturating_mul(2), + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + SubnetAlphaOut::::get(netuid) + 1, + netuid + ), + Error::::InsufficientLiquidity + ); + }); +} diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 50d409561d..1ae16d95c0 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -1,5 +1,6 @@ #![allow(clippy::unwrap_used)] +use approx::assert_abs_diff_eq; use frame_support::traits::Currency; use super::mock::*; @@ -535,11 +536,11 @@ fn test_burn_adjustment() { new_test_ext(1).execute_with(|| { let netuid: u16 = 1; let tempo: u16 = 13; - let burn_cost: u64 = 1000; + let init_burn_cost: u64 = InitialMinBurn::get() + 10_000; let adjustment_interval = 1; let target_registrations_per_interval = 1; add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); + SubtensorModule::set_burn(netuid, init_burn_cost); SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. SubtensorModule::set_target_registrations_per_interval( @@ -550,7 +551,7 @@ fn test_burn_adjustment() { // Register key 1. let hotkey_account_id_1 = U256::from(1); let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, init_burn_cost); assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(hotkey_account_id_1), netuid, @@ -560,7 +561,7 @@ fn test_burn_adjustment() { // Register key 2. let hotkey_account_id_2 = U256::from(2); let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, init_burn_cost); assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(hotkey_account_id_2), netuid, @@ -571,8 +572,13 @@ fn test_burn_adjustment() { // Step the block and trigger the adjustment. step_block(1); - // Check the adjusted burn. - assert_eq!(SubtensorModule::get_burn_as_u64(netuid), 1500); + // Check the adjusted burn is above the initial min burn. + assert!(SubtensorModule::get_burn_as_u64(netuid) > init_burn_cost); + assert_abs_diff_eq!( + SubtensorModule::get_burn_as_u64(netuid), + init_burn_cost.saturating_mul(3).saturating_div(2), // 1.5x + epsilon = 1000 + ); }); } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 5d9db9f4e8..8fe5964311 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -3,6 +3,8 @@ use frame_support::{assert_err, assert_noop, assert_ok, traits::Currency}; use frame_system::RawOrigin; +use safe_math::FixedExt; +use substrate_fixed::traits::FromFixed; use super::mock::*; use crate::*; @@ -10,7 +12,7 @@ use approx::assert_abs_diff_eq; use frame_support::dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}; use frame_support::sp_runtime::DispatchError; use sp_core::{Get, H256, U256}; -use substrate_fixed::types::{I96F32, U64F64, U96F32}; +use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; /*********************************************************** staking::add_stake() tests @@ -57,8 +59,11 @@ fn test_add_stake_ok_no_emission() { 0 ); - // Also total stake should be zero - assert_eq!(SubtensorModule::get_total_stake(), 0); + // Also total stake should be equal to the network initial lock + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + ); // Transfer to hotkey account, and check if the result is ok assert_ok!(SubtensorModule::add_stake( @@ -79,7 +84,10 @@ fn test_add_stake_ok_no_emission() { assert_eq!(SubtensorModule::get_coldkey_balance(&coldkey_account_id), 1); // Check if total stake has increased accordingly. - assert_eq!(SubtensorModule::get_total_stake(), amount); + assert_eq!( + SubtensorModule::get_total_stake(), + amount + SubtensorModule::get_network_min_lock() + ); }); } @@ -353,12 +361,14 @@ fn test_remove_stake_ok_no_emission() { let coldkey_account_id = U256::from(4343); let hotkey_account_id = U256::from(4968585); let amount = DefaultMinStake::::get() * 10; - let fee = DefaultStakingFee::::get(); let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); // Some basic assertions - assert_eq!(SubtensorModule::get_total_stake(), 0); + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + ); assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), 0 @@ -372,6 +382,16 @@ fn test_remove_stake_ok_no_emission() { netuid, amount, ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), + amount + ); + + // Add subnet TAO for the equivalent amount added at price + let amount_tao = + U96F32::saturating_from_num(amount) * SubtensorModule::get_alpha_price(netuid); + SubnetTAO::::mutate(netuid, |v| *v += amount_tao.saturating_to_num::()); + TotalStake::::mutate(|v| *v += amount_tao.saturating_to_num::()); // Do the magic assert_ok!(SubtensorModule::remove_stake( @@ -381,13 +401,24 @@ fn test_remove_stake_ok_no_emission() { amount )); + let fee = SubtensorModule::calculate_staking_fee( + Some((&hotkey_account_id, netuid)), + &coldkey_account_id, + None, + &coldkey_account_id, + U96F32::saturating_from_num(amount), + ); + // we do not expect the exact amount due to slippage assert!(SubtensorModule::get_coldkey_balance(&coldkey_account_id) > amount / 10 * 9 - fee); assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), 0 ); - assert_eq!(SubtensorModule::get_total_stake(), fee); + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + fee + ); }); } @@ -403,7 +434,10 @@ fn test_remove_stake_amount_too_low() { register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); // Some basic assertions - assert_eq!(SubtensorModule::get_total_stake(), 0); + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + ); assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), 0 @@ -510,12 +544,14 @@ fn test_remove_stake_total_balance_no_change() { let hotkey_account_id = U256::from(571337); let coldkey_account_id = U256::from(71337); let amount = DefaultMinStake::::get() * 10; - let fee = DefaultStakingFee::::get(); let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); // Some basic assertions - assert_eq!(SubtensorModule::get_total_stake(), 0); + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + ); assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), 0 @@ -532,6 +568,12 @@ fn test_remove_stake_total_balance_no_change() { amount, ); + // Add subnet TAO for the equivalent amount added at price + let amount_tao = + U96F32::saturating_from_num(amount) * SubtensorModule::get_alpha_price(netuid); + SubnetTAO::::mutate(netuid, |v| *v += amount_tao.saturating_to_num::()); + TotalStake::::mutate(|v| *v += amount_tao.saturating_to_num::()); + // Do the magic assert_ok!(SubtensorModule::remove_stake( RuntimeOrigin::signed(coldkey_account_id), @@ -540,6 +582,13 @@ fn test_remove_stake_total_balance_no_change() { amount )); + let fee = SubtensorModule::calculate_staking_fee( + Some((&hotkey_account_id, netuid)), + &coldkey_account_id, + None, + &coldkey_account_id, + U96F32::saturating_from_num(amount), + ); assert_abs_diff_eq!( SubtensorModule::get_coldkey_balance(&coldkey_account_id), amount - fee, @@ -549,7 +598,10 @@ fn test_remove_stake_total_balance_no_change() { SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), 0 ); - assert_eq!(SubtensorModule::get_total_stake(), fee); + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + fee + ); // Check total balance is equal to the added stake. Even after remove stake (no fee, includes reserved/locked balance) let total_balance = Balances::total_balance(&coldkey_account_id); @@ -648,7 +700,10 @@ fn test_remove_stake_total_issuance_no_change() { SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); // Some basic assertions - assert_eq!(SubtensorModule::get_total_stake(), 0); + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + ); assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), 0 @@ -697,7 +752,7 @@ fn test_remove_stake_total_issuance_no_change() { ); assert_abs_diff_eq!( SubtensorModule::get_total_stake(), - fee * 2, + fee * 2 + SubtensorModule::get_network_min_lock(), epsilon = fee / 1000 ); @@ -718,6 +773,170 @@ fn test_remove_stake_total_issuance_no_change() { }); } +// cargo test --package pallet-subtensor --lib -- tests::staking::test_remove_prev_epoch_stake --exact --show-output --nocapture +#[test] +fn test_remove_prev_epoch_stake() { + new_test_ext(1).execute_with(|| { + let def_fee = DefaultStakingFee::::get(); + + // Test case: (amount_to_stake, AlphaDividendsPerSubnet, TotalHotkeyAlphaLastEpoch, expected_fee) + [ + // No previous epoch stake and low hotkey stake + ( + DefaultMinStake::::get() * 10, + 0_u64, + 1000_u64, + def_fee * 2, + ), + // Same, but larger amount to stake - we get 0.005% for unstake + ( + 1_000_000_000, + 0_u64, + 1000_u64, + (1_000_000_000_f64 * 0.00005) as u64 + def_fee, + ), + ( + 100_000_000_000, + 0_u64, + 1000_u64, + (100_000_000_000_f64 * 0.00005) as u64 + def_fee, + ), + // Lower previous epoch stake than current stake + // Staking/unstaking 100 TAO, divs / total = 0.1 => fee is 1 TAO + ( + 100_000_000_000, + 1_000_000_000_u64, + 10_000_000_000_u64, + (100_000_000_000_f64 * 0.1) as u64 + def_fee, + ), + // Staking/unstaking 100 TAO, divs / total = 0.001 => fee is 0.01 TAO + ( + 100_000_000_000, + 10_000_000_u64, + 10_000_000_000_u64, + (100_000_000_000_f64 * 0.001) as u64 + def_fee, + ), + // Higher previous epoch stake than current stake + ( + 1_000_000_000, + 100_000_000_000_u64, + 100_000_000_000_000_u64, + (1_000_000_000_f64 * 0.001) as u64 + def_fee, + ), + ] + .iter() + .for_each( + |(amount_to_stake, alpha_divs, hotkey_alpha, expected_fee)| { + let subnet_owner_coldkey = U256::from(1); + let subnet_owner_hotkey = U256::from(2); + let hotkey_account_id = U256::from(581337); + let coldkey_account_id = U256::from(81337); + let amount = *amount_to_stake; + let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); + AlphaDividendsPerSubnet::::insert(netuid, hotkey_account_id, *alpha_divs); + TotalHotkeyAlphaLastEpoch::::insert(hotkey_account_id, netuid, *hotkey_alpha); + let balance_before = SubtensorModule::get_coldkey_balance(&coldkey_account_id); + + // Stake to hotkey account, and check if the result is ok + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + amount + )); + + // Remove all stake + let stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey_account_id, + &coldkey_account_id, + netuid, + ); + + assert_ok!(SubtensorModule::remove_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + stake + )); + + // Measure actual fee + let balance_after = SubtensorModule::get_coldkey_balance(&coldkey_account_id); + let actual_fee = balance_before - balance_after; + + assert_abs_diff_eq!(actual_fee, *expected_fee, epsilon = *expected_fee / 100,); + }, + ); + }); +} + +// cargo test --package pallet-subtensor --lib -- tests::staking::test_staking_sets_div_variables --exact --show-output --nocapture +#[test] +fn test_staking_sets_div_variables() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1); + let subnet_owner_hotkey = U256::from(2); + let hotkey_account_id = U256::from(581337); + let coldkey_account_id = U256::from(81337); + let amount = 100_000_000_000; + let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let tempo = 10; + Tempo::::insert(netuid, tempo); + register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, amount); + + // Verify that divident variables are clear in the beginning + assert_eq!( + AlphaDividendsPerSubnet::::get(netuid, hotkey_account_id), + 0 + ); + assert_eq!( + TotalHotkeyAlphaLastEpoch::::get(hotkey_account_id, netuid), + 0 + ); + + // Stake to hotkey account, and check if the result is ok + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey_account_id), + hotkey_account_id, + netuid, + amount + )); + + // Verify that divident variables are still clear in the beginning + assert_eq!( + AlphaDividendsPerSubnet::::get(netuid, hotkey_account_id), + 0 + ); + assert_eq!( + TotalHotkeyAlphaLastEpoch::::get(hotkey_account_id, netuid), + 0 + ); + + // Wait for 1 epoch + step_block(tempo + 1); + + // Verify that divident variables have been set + let stake = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey_account_id, + &coldkey_account_id, + netuid, + ); + + assert!(AlphaDividendsPerSubnet::::get(netuid, hotkey_account_id) > 0); + assert_abs_diff_eq!( + TotalHotkeyAlphaLastEpoch::::get(hotkey_account_id, netuid), + stake, + epsilon = stake / 100_000 + ); + }); +} + /*********************************************************** staking::get_coldkey_balance() tests ************************************************************/ @@ -762,8 +981,11 @@ fn test_add_stake_to_hotkey_account_ok() { let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); register_ok_neuron(netuid, hotkey_id, coldkey_id, 192213123); - // There is not stake in the system at first, so result should be 0; - assert_eq!(SubtensorModule::get_total_stake(), 0); + // There is no stake in the system at first, other than the network initial lock so result; + assert_eq!( + SubtensorModule::get_total_stake(), + SubtensorModule::get_network_min_lock() + ); SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( &hotkey_id, @@ -2219,11 +2441,75 @@ fn test_remove_stake_fee_goes_to_subnet_tao() { }); } +// cargo test --package pallet-subtensor --lib -- tests::staking::test_remove_stake_fee_realistic_values --exact --show-output --nocapture #[test] -fn test_stake_below_min_validate() { - // Testing the signed extension validate function - // correctly filters the `add_stake` transaction. +fn test_remove_stake_fee_realistic_values() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let hotkey = U256::from(2); + let coldkey = U256::from(3); + let alpha_to_unstake = 111_180_000_000; + let alpha_divs = 2_816_190; + + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + + // Mock a realistic scenario: + // Subnet 1 has 3896 TAO and 128_011 Alpha in reserves, which + // makes its price ~0.03. + // A hotkey has 111 Alpha stake and is unstaking all Alpha. + // Alpha dividends of this hotkey are ~0.0028 + // This makes fee be equal ~0.0028 Alpha ~= 84000 rao + let tao_reserve: U96F32 = U96F32::from_num(3_896_056_559_708_u64); + let alpha_in: U96F32 = U96F32::from_num(128_011_331_299_964_u64); + SubnetTAO::::insert(netuid, tao_reserve.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_in.to_num::()); + AlphaDividendsPerSubnet::::insert(netuid, hotkey, alpha_divs); + TotalHotkeyAlphaLastEpoch::::insert(hotkey, netuid, alpha_to_unstake); + + // Add stake first time to init TotalHotkeyAlpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + alpha_to_unstake, + ); + + // Remove stake to measure fee + let balance_before = SubtensorModule::get_coldkey_balance(&coldkey); + let expected_tao_no_fee = + SubtensorModule::sim_swap_alpha_for_tao(netuid, alpha_to_unstake).unwrap(); + + // Estimate fees + let mut expected_fee = + expected_tao_no_fee as f64 * alpha_divs as f64 / alpha_to_unstake as f64; + if expected_fee < expected_tao_no_fee as f64 * 0.00005 { + expected_fee = expected_tao_no_fee as f64 * 0.00005; + } + + assert_ok!(SubtensorModule::remove_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + alpha_to_unstake + )); + + // Calculate expected fee + let balance_after = SubtensorModule::get_coldkey_balance(&coldkey); + let actual_fee = expected_tao_no_fee as f64 - (balance_after - balance_before) as f64; + log::info!("Actual fee: {:?}", actual_fee); + assert_abs_diff_eq!( + actual_fee as u64, + expected_fee as u64, + epsilon = expected_fee as u64 / 1000 + ); + }); +} + +#[test] +fn test_stake_below_min_validate() { new_test_ext(0).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); @@ -2437,7 +2723,11 @@ fn test_stake_overflow() { ); // Check if total stake has increased accordingly. - assert_abs_diff_eq!(SubtensorModule::get_total_stake(), amount, epsilon = 10); + assert_abs_diff_eq!( + SubtensorModule::get_total_stake(), + amount + SubtensorModule::get_network_min_lock(), + epsilon = 10 + ); }); } @@ -3403,8 +3693,11 @@ fn test_add_stake_limit_ok() { // Check that price has updated to ~24 = (150+450) / (100 - 75) let exp_price = U96F32::from_num(24.0); let current_price: U96F32 = U96F32::from_num(SubtensorModule::get_alpha_price(netuid)); - assert!(exp_price.saturating_sub(current_price) < 0.0001); - assert!(current_price.saturating_sub(exp_price) < 0.0001); + assert_abs_diff_eq!( + exp_price.to_num::(), + current_price.to_num::(), + epsilon = 0.0001, + ); }); } @@ -3783,7 +4076,7 @@ fn test_remove_99_9991_per_cent_stake_removes_all() { let coldkey_account_id = U256::from(81337); let amount = 10_000_000_000; let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); - let fee = DefaultStakingFee::::get(); + let mut fee = DefaultStakingFee::::get(); register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); // Give it some $$$ in his coldkey balance @@ -3803,18 +4096,20 @@ fn test_remove_99_9991_per_cent_stake_removes_all() { &coldkey_account_id, netuid, ); + let remove_amount = (U64F64::from_num(alpha) * U64F64::from_num(0.999991)).to_num::(); assert_ok!(SubtensorModule::remove_stake( RuntimeOrigin::signed(coldkey_account_id), hotkey_account_id, netuid, - (U64F64::from_num(alpha) * U64F64::from_num(0.999991)).to_num::() + remove_amount, )); // Check that all alpha was unstaked and all TAO balance was returned (less fees) + fee = fee + fee.max((remove_amount as f64 * 0.00005) as u64); assert_abs_diff_eq!( SubtensorModule::get_coldkey_balance(&coldkey_account_id), - amount - fee * 2, - epsilon = 10000, + amount - fee, + epsilon = 100000, ); assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&hotkey_account_id), @@ -3885,3 +4180,254 @@ fn test_remove_99_9989_per_cent_stake_leaves_a_little() { assert_abs_diff_eq!(new_alpha, (alpha as f64 * 0.01) as u64, epsilon = 10); }); } + +#[test] +fn test_move_stake_limit_partial() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let stake_amount = 150_000_000_000; + let move_amount = 150_000_000_000; + + // add network + let origin_netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let destination_netuid: u16 = + add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + register_ok_neuron(origin_netuid, hotkey, coldkey, 192213123); + register_ok_neuron(destination_netuid, hotkey, coldkey, 192213123); + + // Give the neuron some stake to remove + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + origin_netuid, + stake_amount, + ); + + // Forse-set alpha in and tao reserve to make price equal 1.5 on both origin and destination, + // but there's much more liquidity on destination, so its price wouldn't go up when restaked + let tao_reserve: U96F32 = U96F32::from_num(150_000_000_000_u64); + let alpha_in: U96F32 = U96F32::from_num(100_000_000_000_u64); + SubnetTAO::::insert(origin_netuid, tao_reserve.to_num::()); + SubnetAlphaIn::::insert(origin_netuid, alpha_in.to_num::()); + SubnetTAO::::insert(destination_netuid, (tao_reserve * 100_000).to_num::()); + SubnetAlphaIn::::insert(destination_netuid, (alpha_in * 100_000).to_num::()); + let current_price: U96F32 = + U96F32::from_num(SubtensorModule::get_alpha_price(origin_netuid)); + assert_eq!(current_price, U96F32::from_num(1.5)); + + // The relative price between origin and destination subnets is 1. + // Setup limit relative price so that it doesn't drop by more than 1% from current price + let limit_price = 990_000_000; + + // Move stake with slippage safety - executes partially + assert_ok!(SubtensorModule::swap_stake_limit( + RuntimeOrigin::signed(coldkey), + hotkey, + origin_netuid, + destination_netuid, + move_amount, + limit_price, + true, + )); + + let new_alpha = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + origin_netuid, + ); + + assert_abs_diff_eq!(new_alpha, 149_000_000_000, epsilon = 100_000_000,); + }); +} + +#[test] +fn test_unstake_all_hits_liquidity_min() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let stake_amount = 190_000_000_000; // 190 Alpha + + let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + register_ok_neuron(netuid, hotkey, coldkey, 192213123); + // Give the neuron some stake to remove + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + stake_amount, + ); + + // Setup the Alpha pool so that removing all the Alpha will bring liqudity below the minimum + let remaining_tao: I96F32 = + DefaultMinimumPoolLiquidity::::get().saturating_sub(I96F32::from(1)); + let alpha_reserves: I110F18 = I110F18::from(stake_amount + 10_000_000); + let alpha = stake_amount; + + let k: I110F18 = I110F18::from_fixed(remaining_tao) + .saturating_mul(alpha_reserves.saturating_add(I110F18::from(alpha))); + let tao_reserves: I110F18 = k.safe_div(alpha_reserves); + + SubnetTAO::::insert(netuid, tao_reserves.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_reserves.to_num::()); + + // Try to unstake, but we reduce liquidity too far + + assert_ok!(SubtensorModule::unstake_all( + RuntimeOrigin::signed(coldkey), + hotkey, + )); + + // Expect nothing to be unstaked + let new_alpha = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + assert_abs_diff_eq!(new_alpha, stake_amount, epsilon = 0,); + }); +} + +#[test] +fn test_unstake_all_alpha_hits_liquidity_min() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let stake_amount = 190_000_000_000; // 190 Alpha + + let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + register_ok_neuron(netuid, hotkey, coldkey, 192213123); + // Give the neuron some stake to remove + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + stake_amount, + ); + + // Setup the Alpha pool so that removing all the Alpha will bring liqudity below the minimum + let remaining_tao: I96F32 = + DefaultMinimumPoolLiquidity::::get().saturating_sub(I96F32::from(1)); + let alpha_reserves: I110F18 = I110F18::from(stake_amount + 10_000_000); + let alpha = stake_amount; + + let k: I110F18 = I110F18::from_fixed(remaining_tao) + .saturating_mul(alpha_reserves.saturating_add(I110F18::from(alpha))); + let tao_reserves: I110F18 = k.safe_div(alpha_reserves); + + SubnetTAO::::insert(netuid, tao_reserves.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_reserves.to_num::()); + + // Try to unstake, but we reduce liquidity too far + + assert_ok!(SubtensorModule::unstake_all_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + )); + + // Expect nothing to be unstaked + let new_alpha = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + assert_abs_diff_eq!(new_alpha, stake_amount, epsilon = 0,); + }); +} + +#[test] +fn test_unstake_all_alpha_works() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let stake_amount = 190_000_000_000; // 190 Alpha + + let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + register_ok_neuron(netuid, hotkey, coldkey, 192213123); + // Give the neuron some stake to remove + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + stake_amount, + ); + + // Setup the Alpha pool so that removing all the Alpha will keep liq above min + let remaining_tao: I96F32 = + DefaultMinimumPoolLiquidity::::get().saturating_add(I96F32::from(10_000_000)); + let alpha_reserves: I110F18 = I110F18::from(stake_amount + 10_000_000); + let alpha = stake_amount; + + let k: I110F18 = I110F18::from_fixed(remaining_tao) + .saturating_mul(alpha_reserves.saturating_add(I110F18::from(alpha))); + let tao_reserves: I110F18 = k.safe_div(alpha_reserves); + + SubnetTAO::::insert(netuid, tao_reserves.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_reserves.to_num::()); + + // Unstake all alpha to root + assert_ok!(SubtensorModule::unstake_all_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + )); + + let new_alpha = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + assert_abs_diff_eq!(new_alpha, 0, epsilon = 1_000,); + let new_root = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, 0); + assert!(new_root > 100_000); + }); +} + +#[test] +fn test_unstake_all_works() { + new_test_ext(1).execute_with(|| { + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let stake_amount = 190_000_000_000; // 190 Alpha + + let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + register_ok_neuron(netuid, hotkey, coldkey, 192213123); + // Give the neuron some stake to remove + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + stake_amount, + ); + + // Setup the Alpha pool so that removing all the Alpha will keep liq above min + let remaining_tao: I96F32 = + DefaultMinimumPoolLiquidity::::get().saturating_add(I96F32::from(10_000_000)); + let alpha_reserves: I110F18 = I110F18::from(stake_amount + 10_000_000); + let alpha = stake_amount; + + let k: I110F18 = I110F18::from_fixed(remaining_tao) + .saturating_mul(alpha_reserves.saturating_add(I110F18::from(alpha))); + let tao_reserves: I110F18 = k.safe_div(alpha_reserves); + + SubnetTAO::::insert(netuid, tao_reserves.to_num::()); + SubnetAlphaIn::::insert(netuid, alpha_reserves.to_num::()); + + // Unstake all alpha to root + assert_ok!(SubtensorModule::unstake_all( + RuntimeOrigin::signed(coldkey), + hotkey, + )); + + let new_alpha = + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); + assert_abs_diff_eq!(new_alpha, 0, epsilon = 1_000,); + let new_balance = SubtensorModule::get_coldkey_balance(&coldkey); + assert!(new_balance > 100_000); + }); +} diff --git a/pallets/subtensor/src/tests/staking2.rs b/pallets/subtensor/src/tests/staking2.rs index d4c1145435..6fbabf83b2 100644 --- a/pallets/subtensor/src/tests/staking2.rs +++ b/pallets/subtensor/src/tests/staking2.rs @@ -6,7 +6,7 @@ use frame_support::{ weights::Weight, }; use sp_core::U256; -use substrate_fixed::types::I96F32; +use substrate_fixed::types::{I96F32, U96F32}; // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --workspace --test staking2 -- test_swap_tao_for_alpha_dynamic_mechanism --exact --nocapture #[test] @@ -623,3 +623,326 @@ fn test_try_associate_hotkey() { assert_eq!(SubtensorModule::get_owned_hotkeys(&coldkey2).len(), 0); }); } + +#[test] +fn test_stake_fee_api() { + // The API should match the calculation + new_test_ext(1).execute_with(|| { + let hotkey1 = U256::from(1); + let coldkey1 = U256::from(2); + let hotkey2 = U256::from(3); + let coldkey2 = U256::from(4); + + let netuid0 = 1; + let netuid1 = 2; + let root_netuid = SubtensorModule::get_root_netuid(); + + let alpha_divs = 100_000_000_000; + let total_hotkey_alpha = 100_000_000_000; + let tao_in = 100_000_000_000; // 100 TAO + let reciprocal_price = 2; // 1 / price + let stake_amount = 100_000_000_000; + + // Setup alpha out + SubnetAlphaOut::::insert(netuid0, 100_000_000_000); + SubnetAlphaOut::::insert(netuid1, 100_000_000_000); + // Set pools using price + SubnetAlphaIn::::insert(netuid0, tao_in * reciprocal_price); + SubnetTAO::::insert(netuid0, tao_in); + SubnetAlphaIn::::insert(netuid1, tao_in * reciprocal_price); + SubnetTAO::::insert(netuid1, tao_in); + + // Setup alpha divs for hotkey1 + AlphaDividendsPerSubnet::::insert(netuid0, hotkey1, alpha_divs); + AlphaDividendsPerSubnet::::insert(netuid1, hotkey1, alpha_divs); + + // Setup total hotkey alpha for hotkey1 + TotalHotkeyAlpha::::insert(hotkey1, netuid0, total_hotkey_alpha); + TotalHotkeyAlpha::::insert(hotkey1, netuid1, total_hotkey_alpha); + + // Test stake fee for add_stake + let stake_fee_0 = SubtensorModule::get_stake_fee( + None, + coldkey1, + Some((hotkey1, netuid0)), + coldkey1, + stake_amount, + ); + let dynamic_fee_0 = SubtensorModule::calculate_staking_fee( + None, + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_0, dynamic_fee_0); + + // Test stake fee for remove on root + let stake_fee_1 = SubtensorModule::get_stake_fee( + Some((hotkey1, root_netuid)), + coldkey1, + None, + coldkey1, + stake_amount, + ); + let dynamic_fee_1 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + None, + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_1, dynamic_fee_1); + + // Test stake fee for move from root to non-root + let stake_fee_2 = SubtensorModule::get_stake_fee( + Some((hotkey1, root_netuid)), + coldkey1, + Some((hotkey1, netuid0)), + coldkey1, + stake_amount, + ); + let dynamic_fee_2 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_2, dynamic_fee_2); + + // Test stake fee for move between hotkeys on root + let stake_fee_3 = SubtensorModule::get_stake_fee( + Some((hotkey1, root_netuid)), + coldkey1, + Some((hotkey2, root_netuid)), + coldkey1, + stake_amount, + ); + let dynamic_fee_3 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + Some((&hotkey2, root_netuid)), + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_3, dynamic_fee_3); + + // Test stake fee for move between coldkeys on root + let stake_fee_4 = SubtensorModule::get_stake_fee( + Some((hotkey1, root_netuid)), + coldkey1, + Some((hotkey1, root_netuid)), + coldkey2, + stake_amount, + ); + let dynamic_fee_4 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + Some((&hotkey1, root_netuid)), + &coldkey2, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_4, dynamic_fee_4); + + // Test stake fee for *swap* from non-root to root + let stake_fee_5 = SubtensorModule::get_stake_fee( + Some((hotkey1, netuid0)), + coldkey1, + Some((hotkey1, root_netuid)), + coldkey1, + stake_amount, + ); + let dynamic_fee_5 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, root_netuid)), + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_5, dynamic_fee_5); + + // Test stake fee for move between hotkeys on non-root + let stake_fee_6 = SubtensorModule::get_stake_fee( + Some((hotkey1, netuid0)), + coldkey1, + Some((hotkey2, netuid0)), + coldkey1, + stake_amount, + ); + let dynamic_fee_6 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey2, netuid0)), + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_6, dynamic_fee_6); + + // Test stake fee for move between coldkeys on non-root + let stake_fee_7 = SubtensorModule::get_stake_fee( + Some((hotkey1, netuid0)), + coldkey1, + Some((hotkey1, netuid0)), + coldkey2, + stake_amount, + ); + let dynamic_fee_7 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey2, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_7, dynamic_fee_7); + + // Test stake fee for *swap* from non-root to non-root + let stake_fee_8 = SubtensorModule::get_stake_fee( + Some((hotkey1, netuid0)), + coldkey1, + Some((hotkey1, netuid1)), + coldkey1, + stake_amount, + ); + let dynamic_fee_8 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, netuid1)), + &coldkey1, + U96F32::saturating_from_num(stake_amount), + ); + assert_eq!(stake_fee_8, dynamic_fee_8); + }); +} + +#[test] +fn test_stake_fee_calculation() { + new_test_ext(1).execute_with(|| { + let hotkey1 = U256::from(1); + let coldkey1 = U256::from(2); + let hotkey2 = U256::from(3); + let coldkey2 = U256::from(4); + + let netuid0 = 1; + let netuid1 = 2; + let root_netuid = SubtensorModule::get_root_netuid(); + // Set SubnetMechanism to 1 (Dynamic) + SubnetMechanism::::insert(netuid0, 1); + SubnetMechanism::::insert(netuid1, 1); + + let alpha_divs = 100_000_000_000; + let total_hotkey_alpha = 100_000_000_000; + let tao_in = 100_000_000_000; // 100 TAO + let reciprocal_price = 2; // 1 / price + let stake_amount = 100_000_000_000_u64; + + let default_fee = DefaultStakingFee::::get(); + + // Setup alpha out + SubnetAlphaOut::::insert(netuid0, 100_000_000_000); + SubnetAlphaOut::::insert(netuid1, 100_000_000_000); + // Set pools using price + SubnetAlphaIn::::insert(netuid0, tao_in * reciprocal_price); + SubnetTAO::::insert(netuid0, tao_in); + SubnetAlphaIn::::insert(netuid1, tao_in * reciprocal_price); + SubnetTAO::::insert(netuid1, tao_in); + + // Setup alpha divs for hotkey1 + AlphaDividendsPerSubnet::::insert(netuid0, hotkey1, alpha_divs); + AlphaDividendsPerSubnet::::insert(netuid1, hotkey1, alpha_divs); + + // Setup total hotkey alpha for hotkey1 + TotalHotkeyAlpha::::insert(hotkey1, netuid0, total_hotkey_alpha); + TotalHotkeyAlpha::::insert(hotkey1, netuid1, total_hotkey_alpha); + + // Test stake fee for add_stake + let stake_fee_0 = SubtensorModule::calculate_staking_fee( + None, + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey1, + U96F32::from_num(stake_amount), + ); // Default for adding stake + assert_eq!(stake_fee_0, default_fee); + + // Test stake fee for remove on root + let stake_fee_1 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + None, + &coldkey1, + U96F32::from_num(stake_amount), + ); // Default for removing stake from root + assert_eq!(stake_fee_1, default_fee); + + // Test stake fee for move from root to non-root + let stake_fee_2 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey1, + U96F32::from_num(stake_amount), + ); // Default for moving stake from root to non-root + assert_eq!(stake_fee_2, default_fee); + + // Test stake fee for move between hotkeys on root + let stake_fee_3 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + Some((&hotkey2, root_netuid)), + &coldkey1, + U96F32::from_num(stake_amount), + ); // Default for moving stake between hotkeys on root + assert_eq!(stake_fee_3, default_fee); + + // Test stake fee for move between coldkeys on root + let stake_fee_4 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, root_netuid)), + &coldkey1, + Some((&hotkey1, root_netuid)), + &coldkey2, + U96F32::from_num(stake_amount), + ); // Default for moving stake between coldkeys on root + assert_eq!(stake_fee_4, default_fee); + + // Test stake fee for *swap* from non-root to root + let stake_fee_5 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, root_netuid)), + &coldkey1, + U96F32::from_num(stake_amount), + ); // Charged a dynamic fee + assert_ne!(stake_fee_5, default_fee); + + // Test stake fee for move between hotkeys on non-root + let stake_fee_6 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey2, netuid0)), + &coldkey1, + U96F32::from_num(stake_amount), + ); // Charge the default fee + assert_eq!(stake_fee_6, default_fee); + + // Test stake fee for move between coldkeys on non-root + let stake_fee_7 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey2, + U96F32::from_num(stake_amount), + ); // Charge the default fee; stake did not leave the subnet. + assert_eq!(stake_fee_7, default_fee); + + // Test stake fee for *swap* from non-root to non-root + let stake_fee_8 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, netuid1)), + &coldkey1, + U96F32::from_num(stake_amount), + ); // Charged a dynamic fee + assert_ne!(stake_fee_8, default_fee); + }); +} diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs new file mode 100644 index 0000000000..4ceeaab897 --- /dev/null +++ b/pallets/subtensor/src/tests/subnet.rs @@ -0,0 +1,261 @@ +use super::mock::*; +use crate::*; +use frame_support::{assert_noop, assert_ok}; +use frame_system::Config; +use sp_core::U256; + +/*************************** + pub fn do_start_call() tests +*****************************/ + +#[test] +fn test_do_start_call_ok() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(block_number + 1) + ); + }); +} + +#[test] +fn test_do_start_call_fail_with_not_existed_subnet() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let coldkey_account_id = U256::from(0); + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_do_start_call_fail_not_owner() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let wrong_owner_account_id = U256::from(2); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + System::set_block_number(System::block_number() + DurationOfStartCall::get()); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(wrong_owner_account_id), + netuid + ), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_do_start_call_fail_with_cannot_start_call_now() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::NeedWaitingMoreBlocksToStarCall + ); + }); +} + +#[test] +fn test_do_start_call_fail_for_set_again() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::FirstEmissionBlockNumberAlreadySet + ); + }); +} + +#[test] +fn test_do_start_call_ok_with_same_block_number_after_coinbase() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(block_number + 1) + ); + + step_block(tempo); + match FirstEmissionBlockNumber::::get(netuid) { + Some(new_emission_block_number) => { + assert_eq!(new_emission_block_number, block_number + 1) + } + None => assert!(FirstEmissionBlockNumber::::get(netuid).is_some()), + } + }); +} + +#[test] +fn test_register_network_min_burn_at_default() { + new_test_ext(1).execute_with(|| { + let sn_owner_coldkey = U256::from(0); + let sn_owner_hotkey = U256::from(1); + let cost = SubtensorModule::get_network_lock_cost(); + + // Give coldkey enough for lock + SubtensorModule::add_balance_to_coldkey_account(&sn_owner_coldkey, cost + 10_000_000_000); + + // Register network + assert_ok!(SubtensorModule::register_network( + <::RuntimeOrigin>::signed(sn_owner_coldkey), + sn_owner_hotkey + )); + // Get last events + let events = System::events(); + let min_burn_event = events + .iter() + .filter(|event| { + matches!( + event.event, + RuntimeEvent::SubtensorModule(Event::::NetworkAdded(..)) + ) + }) + .last(); + + let netuid = match min_burn_event.map(|event| event.event.clone()) { + Some(RuntimeEvent::SubtensorModule(Event::::NetworkAdded(netuid, _))) => netuid, + _ => panic!("Expected NetworkAdded event"), + }; + + // Check min burn is set to default + assert_eq!(MinBurn::::get(netuid), InitialMinBurn::get()); + }); +} diff --git a/pallets/subtensor/src/tests/swap_coldkey.rs b/pallets/subtensor/src/tests/swap_coldkey.rs index 8f7b024e4c..beb4df59a5 100644 --- a/pallets/subtensor/src/tests/swap_coldkey.rs +++ b/pallets/subtensor/src/tests/swap_coldkey.rs @@ -14,6 +14,7 @@ use frame_support::traits::schedule::DispatchTime; use frame_support::traits::schedule::v3::Named as ScheduleNamed; use sp_core::{Get, H256, U256}; use sp_runtime::DispatchError; +use substrate_fixed::types::U96F32; // // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_total_hotkey_coldkey_stakes_this_interval --exact --nocapture // #[test] @@ -537,7 +538,15 @@ fn test_swap_concurrent_modifications() { let netuid: u16 = 1; let initial_stake = 1_000_000_000_000; let additional_stake = 500_000_000_000; - let fee = DefaultStakingFee::::get(); + let initial_stake_alpha = + U96F32::from(initial_stake).saturating_mul(SubtensorModule::get_alpha_price(netuid)); + let fee = SubtensorModule::calculate_staking_fee( + None, + &new_coldkey, + Some((&hotkey, netuid)), + &new_coldkey, + initial_stake_alpha, + ); // Setup initial state add_network(netuid, 1, 1); @@ -588,7 +597,6 @@ fn test_swap_concurrent_modifications() { &mut weight )); - let eps = 500; // RAO assert_abs_diff_eq!( SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( &hotkey, @@ -596,7 +604,7 @@ fn test_swap_concurrent_modifications() { netuid ), stake_before_swap + additional_stake - fee, - epsilon = eps + epsilon = (stake_before_swap + additional_stake - fee) / 1000 ); assert!(!Alpha::::contains_key((hotkey, old_coldkey, netuid))); }); diff --git a/pallets/subtensor/src/tests/swap_hotkey.rs b/pallets/subtensor/src/tests/swap_hotkey.rs index dab1675074..a82972c2f7 100644 --- a/pallets/subtensor/src/tests/swap_hotkey.rs +++ b/pallets/subtensor/src/tests/swap_hotkey.rs @@ -116,56 +116,6 @@ fn test_swap_total_hotkey_stake() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_last_tx_block --exact --nocapture -#[test] -fn test_swap_last_tx_block() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let mut weight = Weight::zero(); - - LastTxBlock::::insert(old_hotkey, 1000); - assert_ok!(SubtensorModule::perform_hotkey_swap( - &old_hotkey, - &new_hotkey, - &coldkey, - &mut weight - )); - - assert!(!LastTxBlock::::contains_key(old_hotkey)); - assert_eq!( - LastTxBlock::::get(new_hotkey), - SubtensorModule::get_current_block_as_u64() - ); - }); -} - -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_last_tx_block_delegate_take --exact --nocapture -#[test] -fn test_swap_last_tx_block_delegate_take() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let mut weight = Weight::zero(); - - crate::LastTxBlockDelegateTake::::insert(old_hotkey, 1000); - assert_ok!(SubtensorModule::perform_hotkey_swap( - &old_hotkey, - &new_hotkey, - &coldkey, - &mut weight - )); - - assert!(!LastTxBlockDelegateTake::::contains_key(old_hotkey)); - assert_eq!( - LastTxBlockDelegateTake::::get(new_hotkey), - SubtensorModule::get_current_block_as_u64() - ); - }); -} - // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_senate_members --exact --nocapture #[test] fn test_swap_senate_members() { @@ -947,8 +897,11 @@ fn test_swap_stake_success() { // Initialize staking variables for old_hotkey TotalHotkeyAlpha::::insert(old_hotkey, netuid, amount); + TotalHotkeyAlphaLastEpoch::::insert(old_hotkey, netuid, amount * 2); TotalHotkeyShares::::insert(old_hotkey, netuid, U64F64::from_num(shares)); Alpha::::insert((old_hotkey, coldkey, netuid), U64F64::from_num(amount)); + AlphaDividendsPerSubnet::::insert(netuid, old_hotkey, amount); + TaoDividendsPerSubnet::::insert(netuid, old_hotkey, amount); // Perform the swap SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); @@ -956,6 +909,14 @@ fn test_swap_stake_success() { // Verify the swap assert_eq!(TotalHotkeyAlpha::::get(old_hotkey, netuid), 0); assert_eq!(TotalHotkeyAlpha::::get(new_hotkey, netuid), amount); + assert_eq!( + TotalHotkeyAlphaLastEpoch::::get(old_hotkey, netuid), + 0 + ); + assert_eq!( + TotalHotkeyAlphaLastEpoch::::get(new_hotkey, netuid), + amount * 2 + ); assert_eq!( TotalHotkeyShares::::get(old_hotkey, netuid), U64F64::from_num(0) @@ -972,6 +933,16 @@ fn test_swap_stake_success() { Alpha::::get((new_hotkey, coldkey, netuid)), U64F64::from_num(amount) ); + assert_eq!(AlphaDividendsPerSubnet::::get(netuid, old_hotkey), 0); + assert_eq!( + AlphaDividendsPerSubnet::::get(netuid, new_hotkey), + amount + ); + assert_eq!(TaoDividendsPerSubnet::::get(netuid, old_hotkey), 0); + assert_eq!( + TaoDividendsPerSubnet::::get(netuid, new_hotkey), + amount + ); }); } @@ -1387,3 +1358,39 @@ fn test_swap_hotkey_is_sn_owner_hotkey() { assert_eq!(SubnetOwnerHotkey::::get(netuid), new_hotkey); }); } + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_swap_rate_limits --exact --nocapture +#[test] +fn test_swap_hotkey_swap_rate_limits() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + let last_tx_block = 123; + let delegate_take_block = 4567; + let child_key_take_block = 8910; + + // Set the last tx block for the old hotkey + LastTxBlock::::insert(old_hotkey, last_tx_block); + // Set the last delegate take block for the old hotkey + LastTxBlockDelegateTake::::insert(old_hotkey, delegate_take_block); + // Set last childkey take block for the old hotkey + LastTxBlockChildKeyTake::::insert(old_hotkey, child_key_take_block); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Check for new hotkey + assert_eq!(LastTxBlock::::get(new_hotkey), last_tx_block); + assert_eq!( + LastTxBlockDelegateTake::::get(new_hotkey), + delegate_take_block + ); + assert_eq!( + LastTxBlockChildKeyTake::::get(new_hotkey), + child_key_take_block + ); + }); +} diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index 178613fbb6..92a8a64048 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -68,6 +68,7 @@ fn test_replace_neuron() { Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); + Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -138,6 +139,76 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip, 0); assert_eq!(axon_info.port, 0); assert_eq!(axon_info.ip_type, 0); + + // Check bonds are cleared. + assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + }); +} + +#[test] +fn test_bonds_cleared_on_replace() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 1; + let tempo: u16 = 13; + let hotkey_account_id = U256::from(1); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 111111, + &hotkey_account_id, + ); + let coldkey_account_id = U256::from(1234); + + let new_hotkey_account_id = U256::from(2); + let _new_colkey_account_id = U256::from(12345); + + //add network + add_network(netuid, tempo, 0); + + // Register a neuron. + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work, + hotkey_account_id, + coldkey_account_id + )); + + // Get UID + let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id); + assert_ok!(neuron_uid); + let neuron_uid = neuron_uid.unwrap(); + + // set non-default bonds + Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + + // Replace the neuron. + SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); + + // Check old hotkey is not registered on any network. + assert!(SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id).is_err()); + assert!(!SubtensorModule::is_hotkey_registered_on_any_network( + &hotkey_account_id + )); + + let curr_hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, neuron_uid); + assert_ok!(curr_hotkey); + assert_ne!(curr_hotkey.unwrap(), hotkey_account_id); + + // Check new hotkey is registered on the network. + assert!( + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &new_hotkey_account_id).is_ok() + ); + assert!(SubtensorModule::is_hotkey_registered_on_any_network( + &new_hotkey_account_id + )); + assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); + + // Check bonds are cleared. + assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); }); } diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 5fcbbcb698..14b80a0310 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -3110,7 +3110,7 @@ fn test_reveal_at_exact_block() { let current_block = SubtensorModule::get_current_block_as_u64(); if current_block < reveal_epoch_start_block { // Advance to one block before the reveal epoch starts - let blocks_to_advance = reveal_epoch_start_block.saturating_sub(current_block); + let blocks_to_advance = reveal_epoch_start_block - current_block; if blocks_to_advance > 1 { // Advance to one block before the reveal epoch let new_block_number = current_block + blocks_to_advance - 1; @@ -3181,9 +3181,7 @@ fn test_reveal_at_exact_block() { let commit_epoch = SubtensorModule::get_epoch_index(netuid, commit_block); let reveal_epoch = commit_epoch.saturating_add(reveal_period); let expiration_epoch = reveal_epoch.saturating_add(1); - let expiration_epoch_start_block = expiration_epoch - .saturating_mul(tempo_plus_one) - .saturating_sub(netuid_plus_one); + let expiration_epoch_start_block = expiration_epoch * tempo_plus_one - netuid_plus_one; let current_block = SubtensorModule::get_current_block_as_u64(); if current_block < expiration_epoch_start_block { diff --git a/pallets/subtensor/src/utils/evm.rs b/pallets/subtensor/src/utils/evm.rs new file mode 100644 index 0000000000..a34f6afc80 --- /dev/null +++ b/pallets/subtensor/src/utils/evm.rs @@ -0,0 +1,74 @@ +use super::*; + +use frame_support::ensure; +use frame_system::ensure_signed; +use sp_core::{H160, ecdsa::Signature, hashing::keccak_256}; + +impl Pallet { + /// Associate an EVM key with a hotkey. + /// + /// This function accepts a Signature, which is a signed message containing the hotkey concatenated with + /// the hashed block number. It will then attempt to recover the EVM key from the signature and compare it + /// with the `evm_key` parameter, and ensures that they match. + /// + /// The EVM key is expected to sign the message according to this formula to produce the signature: + /// ```text + /// keccak_256(hotkey ++ keccak_256(block_number)) + /// ``` + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, which should be the coldkey that owns the hotkey. + /// * `netuid` - The unique identifier for the subnet that the hotkey belongs to. + /// * `hotkey` - The hotkey associated with the `origin` coldkey. + /// * `evm_key` - The EVM address to associate with the `hotkey`. + /// * `block_number` - The block number used in the `signature`. + /// * `signature` - A signed message by the `evm_key` containing the `hotkey` and the hashed `block_number`. + pub fn do_associate_evm_key( + origin: T::RuntimeOrigin, + netuid: u16, + hotkey: T::AccountId, + evm_key: H160, + block_number: u64, + signature: Signature, + ) -> dispatch::DispatchResult { + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::get_owning_coldkey_for_hotkey(&hotkey) == coldkey, + Error::::NonAssociatedColdKey + ); + + let uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey)?; + + let mut message = [0u8; 64]; + let block_hash = keccak_256(block_number.encode().as_ref()); + message[..32].copy_from_slice(&hotkey.encode()[..]); + message[32..].copy_from_slice(block_hash.as_ref()); + let public = signature + .recover_prehashed(&keccak_256(message.as_ref())) + .ok_or(Error::::UnableToRecoverPublicKey)?; + let secp_pubkey = libsecp256k1::PublicKey::parse_compressed(&public.0) + .map_err(|_| Error::::UnableToRecoverPublicKey)?; + let uncompressed = secp_pubkey.serialize(); + let hashed_evm_key = H160::from_slice(&keccak_256(&uncompressed[1..])[12..]); + + ensure!( + evm_key == hashed_evm_key, + Error::::InvalidRecoveredPublicKey + ); + + let current_block_number = Self::get_current_block_as_u64(); + + AssociatedEvmAddress::::insert(netuid, uid, (evm_key, current_block_number)); + + Self::deposit_event(Event::EvmKeyAssociated { + netuid, + hotkey, + evm_key, + block_associated: current_block_number, + }); + + Ok(()) + } +} diff --git a/pallets/subtensor/src/utils/identity.rs b/pallets/subtensor/src/utils/identity.rs index 0e83205cc0..c4b9fdc821 100644 --- a/pallets/subtensor/src/utils/identity.rs +++ b/pallets/subtensor/src/utils/identity.rs @@ -135,7 +135,7 @@ impl Pallet { SubnetIdentitiesV2::::insert(netuid, identity.clone()); // Log the identity set event - log::info!("SubnetIdentitySet( netuid:{:?} ) ", netuid); + log::debug!("SubnetIdentitySet( netuid:{:?} ) ", netuid); // Emit an event to notify that an identity has been set Self::deposit_event(Event::SubnetIdentitySet(netuid)); diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index d0a6ae5205..b375cc66e4 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -7,7 +7,7 @@ use safe_math::*; use sp_core::Get; use sp_core::U256; use sp_runtime::Saturating; -use substrate_fixed::types::{I32F32, I96F32}; +use substrate_fixed::types::{I32F32, U96F32}; impl Pallet { pub fn ensure_subnet_owner_or_root( @@ -475,6 +475,7 @@ impl Pallet { } pub fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool) { CommitRevealWeightsEnabled::::set(netuid, enabled); + Self::deposit_event(Event::CommitRevealEnabled(netuid, enabled)); } pub fn get_rho(netuid: u16) -> u16 { @@ -598,9 +599,9 @@ impl Pallet { pub fn get_subnet_owner_cut() -> u16 { SubnetOwnerCut::::get() } - pub fn get_float_subnet_owner_cut() -> I96F32 { - I96F32::saturating_from_num(SubnetOwnerCut::::get()) - .safe_div(I96F32::saturating_from_num(u16::MAX)) + pub fn get_float_subnet_owner_cut() -> U96F32 { + U96F32::saturating_from_num(SubnetOwnerCut::::get()) + .safe_div(U96F32::saturating_from_num(u16::MAX)) } pub fn set_subnet_owner_cut(subnet_owner_cut: u16) { SubnetOwnerCut::::set(subnet_owner_cut); @@ -733,4 +734,12 @@ impl Pallet { SubnetOwnerHotkey::::insert(netuid, hotkey.clone()); Self::deposit_event(Event::SubnetOwnerHotkeySet(netuid, hotkey.clone())); } + + // Get the uid of the Owner Hotkey for a subnet. + pub fn get_owner_uid(netuid: u16) -> Option { + match SubnetOwnerHotkey::::try_get(netuid) { + Ok(owner_hotkey) => Uids::::get(netuid, &owner_hotkey), + Err(_) => None, + } + } } diff --git a/pallets/subtensor/src/utils/mod.rs b/pallets/subtensor/src/utils/mod.rs index 909ad89593..3eb8439959 100644 --- a/pallets/subtensor/src/utils/mod.rs +++ b/pallets/subtensor/src/utils/mod.rs @@ -1,4 +1,5 @@ use super::*; +pub mod evm; pub mod identity; pub mod misc; pub mod rate_limiting; diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 8b30f9665b..c37a78d2e4 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -7,6 +7,7 @@ pub enum TransactionType { SetChildkeyTake, Unknown, RegisterNetwork, + SetWeightsVersionKey, } /// Implement conversion from TransactionType to u16 @@ -17,6 +18,7 @@ impl From for u16 { TransactionType::SetChildkeyTake => 1, TransactionType::Unknown => 2, TransactionType::RegisterNetwork => 3, + TransactionType::SetWeightsVersionKey => 4, } } } @@ -28,6 +30,7 @@ impl From for TransactionType { 0 => TransactionType::SetChildren, 1 => TransactionType::SetChildkeyTake, 3 => TransactionType::RegisterNetwork, + 4 => TransactionType::SetWeightsVersionKey, _ => TransactionType::Unknown, } } @@ -41,14 +44,18 @@ impl Pallet { match tx_type { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), - TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) TransactionType::RegisterNetwork => NetworkRateLimit::::get(), + + TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) + _ => 0, } } - pub fn get_rate_limit_on_subnet(tx_type: &TransactionType, _netuid: u16) -> u64 { + pub fn get_rate_limit_on_subnet(tx_type: &TransactionType, netuid: u16) -> u64 { #[allow(clippy::match_single_binding)] match tx_type { + TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) + .saturating_mul(WeightsVersionKeyRateLimit::::get()), _ => Self::get_rate_limit(tx_type), } } diff --git a/pallets/subtensor/src/utils/try_state.rs b/pallets/subtensor/src/utils/try_state.rs index ffa5869110..1fb75fd4bb 100644 --- a/pallets/subtensor/src/utils/try_state.rs +++ b/pallets/subtensor/src/utils/try_state.rs @@ -6,16 +6,11 @@ impl Pallet { /// Checks [`TotalIssuance`] equals the sum of currency issuance, total stake, and total subnet /// locked. pub(crate) fn check_total_issuance() -> Result<(), sp_runtime::TryRuntimeError> { - // Get the total subnet locked amount - let total_subnet_locked = Self::get_total_subnet_locked(); - // Get the total currency issuance let currency_issuance = T::Currency::total_issuance(); // Calculate the expected total issuance - let expected_total_issuance = currency_issuance - .saturating_add(TotalStake::::get()) - .saturating_add(total_subnet_locked); + let expected_total_issuance = currency_issuance.saturating_add(TotalStake::::get()); // Verify the diff between calculated TI and actual TI is less than delta // diff --git a/pallets/swap-interface/Cargo.toml b/pallets/swap-interface/Cargo.toml index 28c05cac3e..e401d7e548 100644 --- a/pallets/swap-interface/Cargo.toml +++ b/pallets/swap-interface/Cargo.toml @@ -1,13 +1,17 @@ [package] -name = "pallet-subtensor-swap-interface" +name = "subtensor-swap-interface" version = "0.1.0" edition.workspace = true [dependencies] +codec = { workspace = true } +frame-support = { workspace = true } +scale-info = { workspace = true } +uuid = { workspace = true, features = ["v4"] } [lints] workspace = true [features] default = ["std"] -std = [] +std = ["codec/std", "frame-support/std", "scale-info/std", "uuid/std"] diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index ff79e7f93f..df0aacd0d7 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -1,9 +1,8 @@ #![cfg_attr(not(feature = "std"), no_std)] -extern crate alloc; - -use alloc::boxed::Box; -use core::error::Error; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::pallet_prelude::*; +use uuid::Uuid; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum OrderType { @@ -12,14 +11,70 @@ pub enum OrderType { } pub trait SwapHandler { - fn swap(order_t: OrderType, amount: u64) -> Result<(), Box>; - fn add_liquidity(account_id: AccountId, liquidity: u64) -> Result<(u64, u64), Box>; - fn remove_liquidity(account_id: AccountId) -> Result<(), Box>; + fn swap( + netuid: u16, + order_t: OrderType, + amount: u64, + price_limit: u64, + ) -> Result; + fn add_liquidity( + netuid: u16, + account_id: &AccountId, + tick_low: i32, + tick_high: i32, + liquidity: u64, + ) -> Result<(u64, u64), DispatchError>; + fn remove_liquidity( + netuid: u16, + account_id: &AccountId, + position_id: PositionId, + ) -> Result<(u64, u64), DispatchError>; + fn max_price() -> u64; + fn min_price() -> u64; +} + +#[derive(Debug, PartialEq)] +pub struct SwapResult { + pub amount_paid_out: u64, + pub refund: u64, + // calculated new tao/alpha reserves + pub new_tao_reserve: u64, + pub new_alpha_reserve: u64, } pub trait LiquidityDataProvider { fn tao_reserve(netuid: u16) -> u64; fn alpha_reserve(netuid: u16) -> u64; - fn tao_balance(netuid: u16, account_id: &AccountId) -> u64; + fn tao_balance(account_id: &AccountId) -> u64; fn alpha_balance(netuid: u16, account_id: &AccountId) -> u64; } + +#[derive( + Clone, Copy, Decode, Default, Encode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo, +)] +pub struct PositionId([u8; 16]); + +impl PositionId { + /// Create a new position ID using UUID v4 + pub fn new() -> Self { + Self(Uuid::new_v4().into_bytes()) + } +} + +impl From for PositionId { + fn from(value: Uuid) -> Self { + Self(value.into_bytes()) + } +} + +impl From for Uuid { + fn from(value: PositionId) -> Self { + Uuid::from_bytes(value.0) + } +} + +impl From<[u8; 16]> for PositionId { + fn from(value: [u8; 16]) -> Self { + Self(value) + } +} diff --git a/pallets/swap/Cargo.toml b/pallets/swap/Cargo.toml index 6b711754fd..b99619ecc4 100644 --- a/pallets/swap/Cargo.toml +++ b/pallets/swap/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } alloy-primitives = { workspace = true } approx = { workspace = true } codec = { workspace = true } +frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } frame-system = { workspace = true } safe-math = { workspace = true } @@ -18,9 +19,8 @@ sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } substrate-fixed = { workspace = true } -uuid = { workspace = true, features = ["v4"] } -pallet-subtensor-swap-interface = { workspace = true } +subtensor-swap-interface = { workspace = true } [lints] workspace = true @@ -28,19 +28,24 @@ workspace = true [features] default = ["std"] std = [ - "alloy-primitives/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "pallet-subtensor-swap-interface/std", - "safe-math/std", - "scale-info/std", - "serde/std", - "sp-arithmetic/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "substrate-fixed/std", - "uuid/std", + "alloy-primitives/std", + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "subtensor-swap-interface/std", + "safe-math/std", + "scale-info/std", + "serde/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "substrate-fixed/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] diff --git a/pallets/swap/scripts/benchmark.sh b/pallets/swap/scripts/benchmark.sh new file mode 100755 index 0000000000..20feb6b270 --- /dev/null +++ b/pallets/swap/scripts/benchmark.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +cargo build --profile production --features runtime-benchmarks +./target/production/node-subtensor benchmark pallet \ + --chain=local \ + --pallet=pallet_subtensor_swap \ + --extrinsic="*" \ + --steps 50 \ + --repeat 20 \ + --output=pallets/swap/src/weights.rs \ + --template=./.maintain/frame-weight-template.hbs \ No newline at end of file diff --git a/pallets/swap/src/benchmarking.rs b/pallets/swap/src/benchmarking.rs new file mode 100644 index 0000000000..1bf1b8a95d --- /dev/null +++ b/pallets/swap/src/benchmarking.rs @@ -0,0 +1,23 @@ +//! Benchmarking setup for pallet-subtensor-swap +#![cfg(feature = "runtime-benchmarks")] +#![allow(clippy::arithmetic_side_effects)] + +use crate::pallet::{Call, Config, Pallet}; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +#[benchmarks(where T: Config)] +mod benchmarks { + use super::*; // Use imports from outer scope + + #[benchmark] + fn set_fee_rate() { + let netuid: u16 = 1; + let rate: u16 = 100; // Some arbitrary fee rate value + + #[extrinsic_call] + set_fee_rate(RawOrigin::Root, netuid, rate); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/pallets/swap/src/lib.rs b/pallets/swap/src/lib.rs index 2d1ccadb68..e25daae60d 100644 --- a/pallets/swap/src/lib.rs +++ b/pallets/swap/src/lib.rs @@ -2,12 +2,18 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::pallet_prelude::*; -use pallet_subtensor_swap_interface::OrderType; +use subtensor_swap_interface::OrderType; use substrate_fixed::types::U64F64; pub mod pallet; mod position; mod tick; +pub mod weights; + +pub use pallet::*; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; #[cfg(test)] pub(crate) mod mock; diff --git a/pallets/swap/src/mock.rs b/pallets/swap/src/mock.rs index 56c0a0ce71..5732152e34 100644 --- a/pallets/swap/src/mock.rs +++ b/pallets/swap/src/mock.rs @@ -4,13 +4,13 @@ use frame_support::{ traits::{ConstU32, Everything}, }; use frame_system::{self as system, EnsureRoot}; -use pallet_subtensor_swap_interface::LiquidityDataProvider; use sp_core::H256; use sp_runtime::{ BuildStorage, traits::{BlakeTwo256, IdentityLookup}, }; use substrate_fixed::types::U64F64; +use subtensor_swap_interface::LiquidityDataProvider; use crate::SqrtPrice; @@ -67,8 +67,6 @@ parameter_types! { pub const MaxFeeRate: u16 = 10000; // 15.26% pub const MaxPositions: u32 = 100; pub const MinimumLiquidity: u64 = 1_000; - pub MinSqrtPrice: SqrtPrice = U64F64::from_num(0.001); - pub MaxSqrtPrice: SqrtPrice = U64F64::from_num(10.0); } // Mock implementor of LiquidityDataProvider trait @@ -83,7 +81,7 @@ impl LiquidityDataProvider for MockLiquidityProvider { 4_000_000_000 } - fn tao_balance(_: u16, account_id: &AccountId) -> u64 { + fn tao_balance(account_id: &AccountId) -> u64 { if *account_id == OK_ACCOUNT_ID { 100_000_000_000_000 } else { @@ -100,7 +98,6 @@ impl LiquidityDataProvider for MockLiquidityProvider { } } -// Implementations of traits don't support visibility qualifiers impl crate::pallet::Config for Test { type RuntimeEvent = RuntimeEvent; type AdminOrigin = EnsureRoot; @@ -109,8 +106,7 @@ impl crate::pallet::Config for Test { type MaxFeeRate = MaxFeeRate; type MaxPositions = MaxPositions; type MinimumLiquidity = MinimumLiquidity; - type MinSqrtPrice = MinSqrtPrice; - type MaxSqrtPrice = MaxSqrtPrice; + type WeightInfo = (); } // Build genesis storage according to the mock runtime. diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index f91ddad072..c7ee5e4b1f 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1,16 +1,16 @@ use core::marker::PhantomData; -use frame_support::{ensure, traits::Get}; -use pallet_subtensor_swap_interface::LiquidityDataProvider; +use frame_support::{ensure, pallet_prelude::DispatchError, traits::Get}; use safe_math::*; use sp_arithmetic::helpers_128bit; use sp_runtime::traits::AccountIdConversion; use substrate_fixed::types::U64F64; +use subtensor_swap_interface::{LiquidityDataProvider, PositionId, SwapHandler, SwapResult}; use super::pallet::*; use crate::{ NetUid, OrderType, RemoveLiquidityResult, SqrtPrice, - position::{Position, PositionId}, + position::Position, tick::{ActiveTickIndexManager, Tick, TickIndex}, }; @@ -80,14 +80,14 @@ impl SwapStep { }; let mut lim_quantity = match order_type { OrderType::Buy => one - .safe_div(T::MinSqrtPrice::get()) + .safe_div(TickIndex::min_sqrt_price()) .min(one.safe_div(sqrt_price_limit.into())), - OrderType::Sell => T::MaxSqrtPrice::get().min(sqrt_price_limit.into()), + OrderType::Sell => TickIndex::max_sqrt_price().min(sqrt_price_limit.into()), }; if lim_quantity < one.safe_div(current_price) { lim_quantity = match order_type { - OrderType::Buy => one.safe_div(T::MinSqrtPrice::get()), - OrderType::Sell => T::MaxSqrtPrice::get(), + OrderType::Buy => one.safe_div(TickIndex::min_sqrt_price()), + OrderType::Sell => TickIndex::max_sqrt_price(), }; } @@ -227,7 +227,7 @@ impl SwapStep { let delta_out = Pallet::::convert_deltas(self.netuid, self.order_type, self.delta_in); // TODO (look inside method) - // Self::update_reserves(netuid, order_type, delta_in, delta_out); + // Self::update_reserves(netuid, order_type, self.delta_in, delta_out); // Get current tick let current_tick_index = TickIndex::current_bounded::(self.netuid); @@ -280,6 +280,7 @@ impl SwapStep { Ok(SwapStepResult { amount_to_take: amount_swapped.saturating_to_num::(), + delat_in: self.delta_in, delta_out, }) } @@ -345,6 +346,7 @@ impl Pallet { let mut amount_paid_out: u64 = 0; let mut refund: u64 = 0; let mut iteration_counter: u16 = 0; + let mut in_acc: u64 = 0; // Swap one tick at a time until we reach one of the stop conditions while amount_remaining > 0 { @@ -354,6 +356,7 @@ impl Pallet { let swap_result = swap_step.execute()?; + in_acc = in_acc.saturating_add(swap_result.delat_in); amount_remaining = amount_remaining.saturating_sub(swap_result.amount_to_take); amount_paid_out = amount_paid_out.saturating_add(swap_result.delta_out); @@ -368,9 +371,25 @@ impl Pallet { } } + let tao_reserve = T::LiquidityDataProvider::tao_reserve(netuid.into()); + let alpha_reserve = T::LiquidityDataProvider::alpha_reserve(netuid.into()); + + let (new_tao_reserve, new_alpha_reserve) = match order_type { + OrderType::Buy => ( + tao_reserve.saturating_add(in_acc), + alpha_reserve.saturating_sub(amount_paid_out), + ), + OrderType::Sell => ( + tao_reserve.saturating_sub(amount_paid_out), + alpha_reserve.saturating_add(in_acc), + ), + }; + Ok(SwapResult { amount_paid_out, refund, + new_tao_reserve, + new_alpha_reserve, }) } @@ -404,7 +423,7 @@ impl Pallet { (match order_type { OrderType::Buy => { let higher_tick = - Pallet::::find_closest_higher_active_tick_index(netuid, current_price_tick) + ActiveTickIndexManager::find_closest_higher::(netuid, current_price_tick) .unwrap_or(TickIndex::MAX); if higher_tick < TickIndex::MAX { higher_tick.saturating_add(1) @@ -413,7 +432,7 @@ impl Pallet { } } OrderType::Sell => { - Pallet::::find_closest_lower_active_tick_index(netuid, current_price_tick) + ActiveTickIndexManager::find_closest_lower::(netuid, current_price_tick) .unwrap_or(TickIndex::MIN) } }) @@ -614,20 +633,6 @@ impl Pallet { .and_then(|ti| Ticks::::get(netuid, ti)) } - pub fn find_closest_lower_active_tick_index( - netuid: NetUid, - index: TickIndex, - ) -> Option { - ActiveTickIndexManager::find_closest_lower::(netuid, index) - } - - pub fn find_closest_higher_active_tick_index( - netuid: NetUid, - index: TickIndex, - ) -> Option { - ActiveTickIndexManager::find_closest_higher::(netuid, index) - } - /// Here we subtract minimum safe liquidity from current liquidity to stay in the safe range fn current_liquidity_safe(netuid: NetUid) -> U64F64 { U64F64::saturating_from_num( @@ -675,7 +680,7 @@ impl Pallet { let position_id = position.id; ensure!( - T::LiquidityDataProvider::tao_balance(netuid.into(), account_id) >= tao + T::LiquidityDataProvider::tao_balance(account_id) >= tao && T::LiquidityDataProvider::alpha_balance(netuid.into(), account_id) >= alpha, Error::::InsufficientBalance ); @@ -729,7 +734,7 @@ impl Pallet { // If this is a user transaction, withdraw balances and update reserves // TODO this should be returned (tao, alpha) from this function to prevent // mutation of outside storage - the logic should be passed to the user of - // pallet_subtensor_swap_interface + // subtensor_swap_interface // if !protocol { // let current_price = self.state_ops.get_alpha_sqrt_price(); // let (tao, alpha) = position.to_token_amounts(current_price)?; @@ -915,15 +920,62 @@ impl Pallet { } } -#[derive(Debug, PartialEq)] -pub struct SwapResult { - amount_paid_out: u64, - refund: u64, +impl SwapHandler for Pallet { + fn swap( + netuid: u16, + order_t: OrderType, + amount: u64, + price_limit: u64, + ) -> Result { + let sqrt_price_limit = SqrtPrice::saturating_from_num(price_limit) + .checked_sqrt(SqrtPrice::saturating_from_num(2)) + .ok_or(Error::::PriceLimitExceeded)?; + + Self::swap(NetUid::from(netuid), order_t, amount, sqrt_price_limit).map_err(Into::into) + } + + fn add_liquidity( + netuid: u16, + account_id: &T::AccountId, + tick_low: i32, + tick_high: i32, + liquidity: u64, + ) -> Result<(u64, u64), DispatchError> { + let tick_low = TickIndex::new(tick_low).map_err(|_| Error::::InvalidTickRange)?; + let tick_high = TickIndex::new(tick_high).map_err(|_| Error::::InvalidTickRange)?; + + Self::add_liquidity(netuid.into(), account_id, tick_low, tick_high, liquidity) + .map(|(_, tao, alpha)| (tao, alpha)) + .map_err(Into::into) + } + + fn remove_liquidity( + netuid: u16, + account_id: &T::AccountId, + position_id: PositionId, + ) -> Result<(u64, u64), DispatchError> { + Self::remove_liquidity(netuid.into(), account_id, position_id) + .map(|result| (result.tao, result.alpha)) + .map_err(Into::into) + } + + fn min_price() -> u64 { + TickIndex::min_sqrt_price() + .saturating_mul(TickIndex::min_sqrt_price()) + .saturating_to_num() + } + + fn max_price() -> u64 { + TickIndex::max_sqrt_price() + .saturating_mul(TickIndex::max_sqrt_price()) + .saturating_to_num() + } } #[derive(Debug, PartialEq)] struct SwapStepResult { amount_to_take: u64, + delat_in: u64, delta_out: u64, } @@ -1382,13 +1434,36 @@ mod tests { // Swap let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); let swap_result = - Pallet::::swap(netuid, order_type, liquidity, sqrt_limit_price); + Pallet::::swap(netuid, order_type, liquidity, sqrt_limit_price) + .unwrap(); assert_abs_diff_eq!( - swap_result.unwrap().amount_paid_out, + swap_result.amount_paid_out, output_amount, epsilon = output_amount / 100 ); + let (tao_expected, alpha_expected) = match order_type { + OrderType::Buy => ( + MockLiquidityProvider::tao_reserve(netuid.into()) + liquidity, + MockLiquidityProvider::alpha_reserve(netuid.into()) - output_amount, + ), + OrderType::Sell => ( + MockLiquidityProvider::tao_reserve(netuid.into()) + output_amount, + MockLiquidityProvider::alpha_reserve(netuid.into()) - liquidity, + ), + }; + + assert_abs_diff_eq!( + swap_result.new_alpha_reserve, + alpha_expected, + epsilon = alpha_expected / 100 + ); + assert_abs_diff_eq!( + swap_result.new_tao_reserve, + tao_expected, + epsilon = tao_expected / 100 + ); + // Check that low and high ticks' fees were updated properly, and liquidity values were not updated let tick_low_info = Ticks::::get(netuid, tick_low).unwrap(); let tick_high_info = Ticks::::get(netuid, tick_high).unwrap(); @@ -1449,17 +1524,6 @@ mod tests { OrderType::Buy => assert!(current_price_after > current_price), OrderType::Sell => assert!(current_price_after < current_price), } - - // Reserves are updated - // TODO: Add the test - // assert_eq!( - // swap.state_ops.get_tao_reserve(), - // tao_withdrawn + protocol_tao, - // ); - // assert_eq!( - // swap.state_ops.get_alpha_reserve(), - // alpha_withdrawn + protocol_alpha, - // ); }, ); }); @@ -1502,7 +1566,7 @@ mod tests { (-0.01, 0.01, 100_000_000_000_u64), (-0.001, 0.001, 100_000_000_000_u64), ] - .iter() + .into_iter() .for_each( |(price_low_offset, price_high_offset, position_liquidity)| { // Inner part of test case is Order: (order_type, order_liquidity, limit_price, output_amount) @@ -1525,7 +1589,7 @@ mod tests { (OrderType::Buy, 1.0, 1000.0), (OrderType::Sell, 1.0, 0.0001), ] - .iter() + .into_iter() .for_each(|(order_type, order_liquidity_fraction, limit_price)| { new_test_ext().execute_with(|| { ////////////////////////////////////////////// @@ -1540,8 +1604,8 @@ mod tests { let current_price = (sqrt_current_price * sqrt_current_price).to_num::(); - let price_low = *price_low_offset + current_price; - let price_high = *price_high_offset + current_price; + let price_low = price_low_offset + current_price; + let price_high = price_high_offset + current_price; let tick_low = price_to_tick(price_low); let tick_high = price_to_tick(price_high); let (_position_id, _tao, _alpha) = Pallet::::add_liquidity( @@ -1549,7 +1613,7 @@ mod tests { &OK_ACCOUNT_ID, tick_low, tick_high, - *position_liquidity, + position_liquidity, ) .unwrap(); @@ -1564,7 +1628,7 @@ mod tests { let liquidity_before = CurrentLiquidity::::get(netuid); assert_abs_diff_eq!( liquidity_before as f64, - protocol_liquidity + *position_liquidity as f64, + protocol_liquidity + position_liquidity as f64, epsilon = liquidity_before as f64 / 10000. ); @@ -1572,8 +1636,7 @@ mod tests { // Swap // Calculate the expected output amount for the cornercase of one step - let order_liquidity = - *order_liquidity_fraction * *position_liquidity as f64; + let order_liquidity = order_liquidity_fraction * position_liquidity as f64; let input_amount = match order_type { OrderType::Buy => order_liquidity * sqrt_current_price.to_num::(), @@ -1603,16 +1666,45 @@ mod tests { let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); let swap_result = Pallet::::swap( netuid, - *order_type, + order_type, order_liquidity as u64, sqrt_limit_price, - ); + ) + .unwrap(); assert_abs_diff_eq!( - swap_result.unwrap().amount_paid_out as f64, + swap_result.amount_paid_out as f64, output_amount, epsilon = output_amount / 10. ); + if (order_liquidity_fraction <= 0.001) + && (price_low_offset != 0.0) + && (price_high_offset != 0.0) + { + let tao_reserve_f64 = tao_reserve as f64; + let alpha_reserve_f64 = alpha_reserve as f64; + let (tao_expected, alpha_expected) = match order_type { + OrderType::Buy => ( + tao_reserve_f64 + order_liquidity, + alpha_reserve_f64 - output_amount, + ), + OrderType::Sell => ( + tao_reserve_f64 - output_amount, + alpha_reserve_f64 + order_liquidity, + ), + }; + assert_abs_diff_eq!( + swap_result.new_alpha_reserve as f64, + alpha_expected, + epsilon = alpha_expected / 10.0 + ); + assert_abs_diff_eq!( + swap_result.new_tao_reserve as f64, + tao_expected, + epsilon = tao_expected / 10.0 + ); + } + // Assert that price movement is in correct direction let sqrt_current_price_after = AlphaSqrtPrice::::get(netuid); let current_price_after = @@ -1623,9 +1715,9 @@ mod tests { } // Assert that for small amounts price stays within the user position - if (*order_liquidity_fraction <= 0.001) - && (*price_low_offset != 0.0) - && (*price_high_offset != 0.0) + if (order_liquidity_fraction <= 0.001) + && (price_low_offset != 0.0) + && (price_high_offset != 0.0) { assert!(current_price_after <= price_high); assert!(current_price_after >= price_low); @@ -1672,7 +1764,7 @@ mod tests { .collect::>(); let position = positions.first().unwrap(); - assert_eq!(position.liquidity, *position_liquidity,); + assert_eq!(position.liquidity, position_liquidity,); assert_eq!(position.tick_low, tick_low); assert_eq!(position.tick_high, tick_high); assert_eq!(position.fees_alpha, 0); @@ -1680,17 +1772,6 @@ mod tests { // Current liquidity is not updated assert_eq!(CurrentLiquidity::::get(netuid), liquidity_before); - - // Reserves are updated - // TODO: Add the test - // assert_eq!( - // swap.state_ops.get_tao_reserve(), - // tao_withdrawn + protocol_tao, - // ); - // assert_eq!( - // swap.state_ops.get_alpha_reserve(), - // alpha_withdrawn + protocol_alpha, - // ); }); }); }, @@ -1751,11 +1832,11 @@ mod tests { (-0.04, -0.03, 100_000_000_000_u64), (-0.05, -0.03, 100_000_000_000_u64), ] - .iter() + .into_iter() .for_each( |(price_low_offset, price_high_offset, position_liquidity)| { - let price_low = *price_low_offset + current_price; - let price_high = *price_high_offset + current_price; + let price_low = price_low_offset + current_price; + let price_high = price_high_offset + current_price; let tick_low = price_to_tick(price_low); let tick_high = price_to_tick(price_high); let (_position_id, _tao, _alpha) = Pallet::::add_liquidity( @@ -1763,7 +1844,7 @@ mod tests { &OK_ACCOUNT_ID, tick_low, tick_high, - *position_liquidity, + position_liquidity, ) .unwrap(); }, @@ -1788,7 +1869,7 @@ mod tests { (OrderType::Buy, 10_000_000_000, 1000.0), (OrderType::Sell, 10_000_000_000, 0.0001), ] - .iter() + .into_iter() .for_each(|(order_type, order_liquidity, limit_price)| { ////////////////////////////////////////////// // Swap @@ -1800,29 +1881,57 @@ mod tests { OrderType::Buy => { let denom = sqrt_current_price.to_num::() * (sqrt_current_price.to_num::() * liquidity_before as f64 - + *order_liquidity as f64); + + order_liquidity as f64); let per_order_liq = liquidity_before as f64 / denom; - per_order_liq * *order_liquidity as f64 + per_order_liq * order_liquidity as f64 } OrderType::Sell => { let denom = liquidity_before as f64 / sqrt_current_price.to_num::() - + *order_liquidity as f64; + + order_liquidity as f64; let per_order_liq = sqrt_current_price.to_num::() * liquidity_before as f64 / denom; - per_order_liq * *order_liquidity as f64 + per_order_liq * order_liquidity as f64 } }; // Do the swap let sqrt_limit_price = SqrtPrice::from_num((limit_price).sqrt()); let swap_result = - Pallet::::swap(netuid, *order_type, *order_liquidity, sqrt_limit_price); + Pallet::::swap(netuid, order_type, order_liquidity, sqrt_limit_price) + .unwrap(); assert_abs_diff_eq!( - swap_result.unwrap().amount_paid_out as f64, + swap_result.amount_paid_out as f64, output_amount, epsilon = output_amount / 10. ); + let tao_reserve = MockLiquidityProvider::tao_reserve(netuid.into()); + let alpha_reserve = MockLiquidityProvider::alpha_reserve(netuid.into()); + let output_amount = output_amount as u64; + + assert!(output_amount > 0); + + if alpha_reserve > order_liquidity && tao_reserve > order_liquidity { + let (tao_expected, alpha_expected) = match order_type { + OrderType::Buy => { + (tao_reserve + order_liquidity, alpha_reserve - output_amount) + } + OrderType::Sell => { + (tao_reserve - output_amount, alpha_reserve + order_liquidity) + } + }; + assert_abs_diff_eq!( + swap_result.new_alpha_reserve, + alpha_expected, + epsilon = alpha_expected / 100 + ); + assert_abs_diff_eq!( + swap_result.new_tao_reserve, + tao_expected, + epsilon = tao_expected / 100 + ); + } + // Assert that price movement is in correct direction let sqrt_current_price_after = AlphaSqrtPrice::::get(netuid); let current_price_after = @@ -1834,17 +1943,6 @@ mod tests { // Current liquidity is not updated assert_eq!(CurrentLiquidity::::get(netuid), liquidity_before); - - // Reserves are updated - // TODO: Add the test - // assert_eq!( - // swap.state_ops.get_tao_reserve(), - // tao_withdrawn + protocol_tao, - // ); - // assert_eq!( - // swap.state_ops.get_alpha_reserve(), - // alpha_withdrawn + protocol_alpha, - // ); }); // Current price shouldn't be much different from the original diff --git a/pallets/swap/src/pallet/mod.rs b/pallets/swap/src/pallet/mod.rs index d4cbcbefc6..4a259dc794 100644 --- a/pallets/swap/src/pallet/mod.rs +++ b/pallets/swap/src/pallet/mod.rs @@ -1,12 +1,13 @@ use frame_support::{PalletId, pallet_prelude::*, traits::Get}; use frame_system::pallet_prelude::*; -use pallet_subtensor_swap_interface::LiquidityDataProvider; use substrate_fixed::types::U64F64; +use subtensor_swap_interface::{LiquidityDataProvider, PositionId}; use crate::{ NetUid, SqrtPrice, - position::{Position, PositionId}, + position::Position, tick::{LayerLevel, Tick, TickIndex}, + weights::WeightInfo, }; pub use pallet::*; @@ -31,7 +32,7 @@ mod pallet { type AdminOrigin: EnsureOrigin; /// Implementor of - /// [`LiquidityDataProvider`](pallet_subtensor_swap_interface::LiquidityDataProvider). + /// [`LiquidityDataProvider`](subtensor_swap_interface::LiquidityDataProvider). type LiquidityDataProvider: LiquidityDataProvider; /// This type is used to derive protocol accoun ID. @@ -50,13 +51,8 @@ mod pallet { #[pallet::constant] type MinimumLiquidity: Get; - /// Minimum sqrt price across all active ticks - #[pallet::constant] - type MinSqrtPrice: Get; - - /// Maximum sqrt price across all active ticks - #[pallet::constant] - type MaxSqrtPrice: Get; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// Default fee rate if not set @@ -165,7 +161,7 @@ mod pallet { /// /// Only callable by the admin origin #[pallet::call_index(0)] - #[pallet::weight(10_000)] + #[pallet::weight(T::WeightInfo::set_fee_rate())] pub fn set_fee_rate(origin: OriginFor, netuid: u16, rate: u16) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; diff --git a/pallets/swap/src/position.rs b/pallets/swap/src/position.rs index 2d4e2c77ed..26c1d4d981 100644 --- a/pallets/swap/src/position.rs +++ b/pallets/swap/src/position.rs @@ -2,7 +2,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::pallet_prelude::*; use safe_math::*; use substrate_fixed::types::U64F64; -use uuid::Uuid; +use subtensor_swap_interface::PositionId; use crate::pallet::{Config, Error, FeeGlobalAlpha, FeeGlobalTao}; use crate::tick::TickIndex; @@ -122,33 +122,3 @@ impl Position { .saturating_to_num::() } } - -#[derive( - Clone, Copy, Decode, Default, Encode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo, -)] -pub struct PositionId([u8; 16]); - -impl PositionId { - /// Create a new position ID using UUID v4 - pub fn new() -> Self { - Self(Uuid::new_v4().into_bytes()) - } -} - -impl From for PositionId { - fn from(value: Uuid) -> Self { - Self(value.into_bytes()) - } -} - -impl From for Uuid { - fn from(value: PositionId) -> Self { - Uuid::from_bytes(value.0) - } -} - -impl From<[u8; 16]> for PositionId { - fn from(value: [u8; 16]) -> Self { - Self(value) - } -} diff --git a/pallets/swap/src/tick.rs b/pallets/swap/src/tick.rs index 0476fafd29..6e6d4ab5bf 100644 --- a/pallets/swap/src/tick.rs +++ b/pallets/swap/src/tick.rs @@ -184,6 +184,16 @@ impl TickIndex { /// so that tick indexes are positive, which simplifies bit logic const OFFSET: Self = Self(MAX_TICK); + /// The MIN sqrt price, which is caclculated at Self::MIN + pub fn min_sqrt_price() -> SqrtPrice { + SqrtPrice::saturating_from_num(0.0000000002328350195) + } + + /// The MAX sqrt price, which is calculated at Self::MAX + pub fn max_sqrt_price() -> SqrtPrice { + SqrtPrice::saturating_from_num(4294886577.20989222513899790805) + } + /// Get fees above a tick pub fn fees_above(&self, netuid: NetUid, quote: bool) -> U64F64 { let current_tick = Self::current_bounded::(netuid); @@ -251,9 +261,7 @@ impl TickIndex { match Self::try_from_sqrt_price(sqrt_price) { Ok(index) => index, Err(_) => { - let max_price = Self::MAX - .try_to_sqrt_price() - .unwrap_or(SqrtPrice::saturating_from_num(1000)); + let max_price = Self::MAX.to_sqrt_price_bounded(); if sqrt_price > max_price { Self::MAX @@ -274,9 +282,9 @@ impl TickIndex { pub fn to_sqrt_price_bounded(&self) -> SqrtPrice { self.try_to_sqrt_price().unwrap_or_else(|_| { if *self >= Self::MAX { - SqrtPrice::saturating_from_num(1000) + Self::max_sqrt_price() } else { - SqrtPrice::saturating_from_num(0.000001) + Self::min_sqrt_price() } }) } @@ -687,7 +695,7 @@ impl BitmapLayer { /// A bitmap representation of a tick index position across the three-layer structure #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct TickIndexBitmap { +pub(crate) struct TickIndexBitmap { /// The position in layer 0 (top layer) layer0: BitmapLayer, /// The position in layer 1 (middle layer) @@ -710,12 +718,12 @@ impl TickIndexBitmap { /// Converts a position (word, bit) within a layer to a word index in the next layer down /// Note: This returns a bitmap navigation index, NOT a tick index - pub fn layer_to_index(layer: BitmapLayer) -> u32 { + pub(crate) fn layer_to_index(layer: BitmapLayer) -> u32 { layer.word.saturating_mul(128).saturating_add(layer.bit) } /// Get the mask for a bit in the specified layer - pub fn bit_mask(&self, layer: LayerLevel) -> u128 { + pub(crate) fn bit_mask(&self, layer: LayerLevel) -> u128 { match layer { LayerLevel::Top => 1u128 << self.layer0.bit, LayerLevel::Middle => 1u128 << self.layer1.bit, @@ -724,7 +732,7 @@ impl TickIndexBitmap { } /// Get the word for the specified layer - pub fn word_at(&self, layer: LayerLevel) -> u32 { + pub(crate) fn word_at(&self, layer: LayerLevel) -> u32 { match layer { LayerLevel::Top => self.layer0.word, LayerLevel::Middle => self.layer1.word, @@ -733,7 +741,7 @@ impl TickIndexBitmap { } /// Get the bit for the specified layer - pub fn bit_at(&self, layer: LayerLevel) -> u32 { + pub(crate) fn bit_at(&self, layer: LayerLevel) -> u32 { match layer { LayerLevel::Top => self.layer0.bit, LayerLevel::Middle => self.layer1.bit, @@ -754,7 +762,11 @@ impl TickIndexBitmap { /// * Exact match: Vec with [next_bit, bit] /// * Non-exact match: Vec with [closest_bit] /// * No match: Empty Vec - pub fn find_closest_active_bit_candidates(word: u128, bit: u32, lower: bool) -> Vec { + pub(crate) fn find_closest_active_bit_candidates( + word: u128, + bit: u32, + lower: bool, + ) -> Vec { let mut result = vec![]; let mut mask: u128 = 1_u128.wrapping_shl(bit); let mut active_bit: u32 = bit; @@ -1295,6 +1307,18 @@ mod tests { Err(TickMathError::TickOutOfBounds), ); + assert!( + TickIndex::MAX.try_to_sqrt_price().unwrap().abs_diff( + TickIndex::new_unchecked(TickIndex::MAX.get() + 1).to_sqrt_price_bounded() + ) < SqrtPrice::from_num(1e-6) + ); + + assert!( + TickIndex::MIN.try_to_sqrt_price().unwrap().abs_diff( + TickIndex::new_unchecked(TickIndex::MIN.get() - 1).to_sqrt_price_bounded() + ) < SqrtPrice::from_num(1e-6) + ); + // At tick index 0, the sqrt price should be 1.0 let sqrt_price = TickIndex(0).try_to_sqrt_price().unwrap(); assert_eq!(sqrt_price, SqrtPrice::from_num(1.0)); diff --git a/pallets/swap/src/weights.rs b/pallets/swap/src/weights.rs new file mode 100644 index 0000000000..19fc4e8722 --- /dev/null +++ b/pallets/swap/src/weights.rs @@ -0,0 +1,38 @@ +//! Weights for pallet_subtensor_swap +//! +//! This is a default weight implementation with conservative estimates +//! until actual benchmarks are run. + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{ + traits::Get, + weights::{Weight, constants::RocksDbWeight}, +}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_subtensor_swap. +pub trait WeightInfo { + fn set_fee_rate() -> Weight; +} + +/// Default weights for pallet_subtensor_swap. +pub struct DefaultWeight(PhantomData); +impl WeightInfo for DefaultWeight { + fn set_fee_rate() -> Weight { + // Conservative weight estimate: one read and one write + Weight::from_parts(10_000_000, 0) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn set_fee_rate() -> Weight { + Weight::from_parts(10_000_000, 0) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } +} \ No newline at end of file diff --git a/pallets/utility/Cargo.toml b/pallets/utility/Cargo.toml new file mode 100644 index 0000000000..6d217ebd4b --- /dev/null +++ b/pallets/utility/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "pallet-utility" +version = "38.0.0" +edition = "2021" +license = "Apache-2.0" +description = "FRAME utilities pallet" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true, default-features = false, optional = true } +frame-support = { workspace = true, default-features = false } +frame-system = { workspace = true, default-features = false } +sp-core = { workspace = true, default-features = false } +sp-io = { workspace = true, default-features = false} +sp-runtime = { workspace = true, default-features = false} +subtensor-macros = { workspace = true } + +[dev-dependencies] +pallet-balances = { default-features = true, workspace = true } +pallet-collective = { default-features = false, path = "../collective" } +pallet-timestamp = { default-features = true, workspace = true } +sp-core = { default-features = true, workspace = true } +pallet-root-testing = { workspace = true, default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "pallet-collective/std", + "pallet-root-testing/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", + "pallet-balances/try-runtime", + "pallet-collective/try-runtime", + "pallet-root-testing/try-runtime", + "pallet-timestamp/try-runtime" +] diff --git a/pallets/utility/README.md b/pallets/utility/README.md new file mode 100644 index 0000000000..5366951a89 --- /dev/null +++ b/pallets/utility/README.md @@ -0,0 +1,43 @@ +# Utility Module +A stateless module with helpers for dispatch management which does no re-authentication. + +- [`utility::Config`](https://docs.rs/pallet-utility/latest/pallet_utility/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-utility/latest/pallet_utility/pallet/enum.Call.html) + +## Overview + +This module contains two basic pieces of functionality: +- Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a + single dispatch. This can be useful to amalgamate proposals, combining `set_code` with + corresponding `set_storage`s, for efficient multiple payouts with just a single signature + verify, or in combination with one of the other two dispatch functionality. +- Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from + an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative + account IDs) and these can be stacked. This can be useful as a key management tool, where you + need multiple distinct accounts (e.g. as controllers for many staking accounts), but where + it's perfectly fine to have each of them controlled by the same underlying keypair. + Derivative accounts are, for the purposes of proxy filtering considered exactly the same as + the origin and are thus hampered with the origin's filters. + +Since proxy filters are respected in all dispatches of this module, it should never need to be +filtered by any proxy. + +## Interface + +### Dispatchable Functions + +#### For batch dispatch +- `batch` - Dispatch multiple calls from the sender's origin. + +#### For pseudonymal dispatch +- `as_derivative` - Dispatch a call from a derivative signed origin. + +[`Call`]: ./enum.Call.html +[`Config`]: ./trait.Config.html + +License: Apache-2.0 + + +## Release + +Polkadot SDK stable2409 diff --git a/pallets/utility/src/benchmarking.rs b/pallets/utility/src/benchmarking.rs new file mode 100644 index 0000000000..6980552c36 --- /dev/null +++ b/pallets/utility/src/benchmarking.rs @@ -0,0 +1,91 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Benchmarks for Utility Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use alloc::{vec, vec::Vec}; +use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use frame_system::RawOrigin; + +const SEED: u32 = 0; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +benchmarks! { + where_clause { where ::PalletsOrigin: Clone } + batch { + let c in 0 .. 1000; + let mut calls: Vec<::RuntimeCall> = Vec::new(); + for i in 0 .. c { + let call = frame_system::Call::remark { remark: vec![] }.into(); + calls.push(call); + } + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), calls) + verify { + assert_last_event::(Event::BatchCompleted.into()) + } + + as_derivative { + let caller = account("caller", SEED, SEED); + let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), SEED as u16, call) + + batch_all { + let c in 0 .. 1000; + let mut calls: Vec<::RuntimeCall> = Vec::new(); + for i in 0 .. c { + let call = frame_system::Call::remark { remark: vec![] }.into(); + calls.push(call); + } + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), calls) + verify { + assert_last_event::(Event::BatchCompleted.into()) + } + + dispatch_as { + let caller = account("caller", SEED, SEED); + let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); + let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into(); + let pallets_origin: ::PalletsOrigin = origin.caller().clone(); + let pallets_origin = Into::::into(pallets_origin); + }: _(RawOrigin::Root, Box::new(pallets_origin), call) + + force_batch { + let c in 0 .. 1000; + let mut calls: Vec<::RuntimeCall> = Vec::new(); + for i in 0 .. c { + let call = frame_system::Call::remark { remark: vec![] }.into(); + calls.push(call); + } + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), calls) + verify { + assert_last_event::(Event::BatchCompleted.into()) + } + + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs new file mode 100644 index 0000000000..2677f744b6 --- /dev/null +++ b/pallets/utility/src/lib.rs @@ -0,0 +1,521 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Utility Pallet +//! A stateless pallet with helpers for dispatch management which does no re-authentication. +//! +//! - [`Config`] +//! - [`Call`] +//! +//! ## Overview +//! +//! This pallet contains two basic pieces of functionality: +//! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a +//! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with +//! corresponding `set_storage`s, for efficient multiple payouts with just a single signature +//! verify, or in combination with one of the other two dispatch functionality. +//! - Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from +//! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative +//! account IDs) and these can be stacked. This can be useful as a key management tool, where you +//! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where +//! it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative +//! accounts are, for the purposes of proxy filtering considered exactly the same as the origin +//! and are thus hampered with the origin's filters. +//! +//! Since proxy filters are respected in all dispatches of this pallet, it should never need to be +//! filtered by any proxy. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! #### For batch dispatch +//! * `batch` - Dispatch multiple calls from the sender's origin. +//! +//! #### For pseudonymal dispatch +//! * `as_derivative` - Dispatch a call from a derivative signed origin. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod tests; +pub mod weights; + +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, + traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, +}; +use sp_core::TypeId; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; +pub use weights::WeightInfo; + +use subtensor_macros::freeze_struct; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{dispatch::DispatchClass, pallet_prelude::*}; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From + IsType<::RuntimeEvent>; + + /// The overarching call type. + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + UnfilteredDispatchable + + IsSubType> + + IsType<::RuntimeCall>; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: Parameter + + Into<::RuntimeOrigin> + + IsType<<::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as + /// well as the error. + BatchInterrupted { index: u32, error: DispatchError }, + /// Batch of dispatches completed fully with no error. + BatchCompleted, + /// Batch of dispatches completed but has errors. + BatchCompletedWithErrors, + /// A single item within a Batch of dispatches has completed with no error. + ItemCompleted, + /// A single item within a Batch of dispatches has completed with error. + ItemFailed { error: DispatchError }, + /// A call was dispatched. + DispatchedAs { result: DispatchResult }, + } + + // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm + // the `size_of` of the `Call` can be different. To ensure that this don't leads to + // mismatches between native/wasm or to different metadata for the same runtime, we + // algin the call size. The value is chosen big enough to hopefully never reach it. + const CALL_ALIGN: u32 = 1024; + + #[pallet::extra_constants] + impl Pallet { + /// The limit on the number of batched calls. + fn batched_calls_limit() -> u32 { + let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; + let size = core::mem::size_of::<::RuntimeCall>() as u32; + + let align_up = size.saturating_add(CALL_ALIGN.saturating_sub(1)); + let call_size = align_up + .checked_div(CALL_ALIGN) + .unwrap_or(0) + .saturating_mul(CALL_ALIGN); + + let margin_factor: u32 = 3; + + let after_margin = allocator_limit.checked_div(margin_factor).unwrap_or(0); + + after_margin.checked_div(call_size).unwrap_or(0) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + // If you hit this error, you need to try to `Box` big dispatchable parameters. + assert!( + core::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, + "Call enum size should be smaller than {} bytes.", + CALL_ALIGN, + ); + } + } + + #[pallet::error] + pub enum Error { + /// Too many calls batched. + TooManyCalls, + } + + #[pallet::call] + impl Pallet { + /// Send a batch of dispatch calls. + /// + /// May be called from any origin except `None`. + /// + /// - `calls`: The calls to be dispatched from the same origin. The number of call must not + /// exceed the constant: `batched_calls_limit` (available in constant metadata). + /// + /// If origin is root then the calls are dispatched without checking origin filter. (This + /// includes bypassing `frame_system::Config::BaseCallFilter`). + /// + /// ## Complexity + /// - O(C) where C is the number of calls to be batched. + /// + /// This will return `Ok` in all circumstances. To determine the success of the batch, an + /// event is deposited. If a call failed and the batch was interrupted, then the + /// `BatchInterrupted` event is deposited, along with the number of successful calls made + /// and the error of the failed call. If all were successful, then the `BatchCompleted` + /// event is deposited. + #[pallet::call_index(0)] + #[pallet::weight({ + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); + (dispatch_weight, dispatch_class) + })] + pub fn batch( + origin: OriginFor, + calls: Vec<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()); + } + + let is_root = ensure_root(origin.clone()).is_ok(); + let calls_len = calls.len(); + ensure!( + calls_len <= Self::batched_calls_limit() as usize, + Error::::TooManyCalls + ); + + // Track the actual weight of each of the batch calls. + let mut weight = Weight::zero(); + for (index, call) in calls.into_iter().enumerate() { + let info = call.get_dispatch_info(); + // If origin is root, don't apply any dispatch filters; root can call anything. + let result = if is_root { + call.dispatch_bypass_filter(origin.clone()) + } else { + call.dispatch(origin.clone()) + }; + // Add the weight of this call. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); + if let Err(e) = result { + Self::deposit_event(Event::BatchInterrupted { + index: index as u32, + error: e.error, + }); + // Take the weight of this function itself into account. + let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); + // Return the actual used weight + base_weight of this call. + return Ok(Some(base_weight.saturating_add(weight)).into()); + } + Self::deposit_event(Event::ItemCompleted); + } + Self::deposit_event(Event::BatchCompleted); + let base_weight = T::WeightInfo::batch(calls_len as u32); + Ok(Some(base_weight.saturating_add(weight)).into()) + } + + /// Send a call through an indexed pseudonym of the sender. + /// + /// Filter from origin are passed along. The call will be dispatched with an origin which + /// use the same filter as the origin of this call. + /// + /// NOTE: If you need to ensure that any account-based filtering is not honored (i.e. + /// because you expect `proxy` to have been used prior in the call stack and you do not want + /// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1` + /// in the Multisig pallet instead. + /// + /// NOTE: Prior to version *12, this was called `as_limited_sub`. + /// + /// The dispatch origin for this call must be _Signed_. + #[pallet::call_index(1)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::as_derivative() + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(dispatch_info.weight), + dispatch_info.class, + ) + })] + pub fn as_derivative( + origin: OriginFor, + index: u16, + call: Box<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + let mut origin = origin; + let who = ensure_signed(origin.clone())?; + let pseudonym = Self::derivative_account_id(who, index); + origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); + let info = call.get_dispatch_info(); + let result = call.dispatch(origin); + // Always take into account the base weight of this call. + let mut weight = T::WeightInfo::as_derivative() + .saturating_add(T::DbWeight::get().reads_writes(1, 1)); + // Add the real weight of the dispatch. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); + result + .map_err(|mut err| { + err.post_info = Some(weight).into(); + err + }) + .map(|_| Some(weight).into()) + } + + /// Send a batch of dispatch calls and atomically execute them. + /// The whole transaction will rollback and fail if any of the calls failed. + /// + /// May be called from any origin except `None`. + /// + /// - `calls`: The calls to be dispatched from the same origin. The number of call must not + /// exceed the constant: `batched_calls_limit` (available in constant metadata). + /// + /// If origin is root then the calls are dispatched without checking origin filter. (This + /// includes bypassing `frame_system::Config::BaseCallFilter`). + /// + /// ## Complexity + /// - O(C) where C is the number of calls to be batched. + #[pallet::call_index(2)] + #[pallet::weight({ + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); + (dispatch_weight, dispatch_class) + })] + pub fn batch_all( + origin: OriginFor, + calls: Vec<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()); + } + + let is_root = ensure_root(origin.clone()).is_ok(); + let calls_len = calls.len(); + ensure!( + calls_len <= Self::batched_calls_limit() as usize, + Error::::TooManyCalls + ); + + // Track the actual weight of each of the batch calls. + let mut weight = Weight::zero(); + for (index, call) in calls.into_iter().enumerate() { + let info = call.get_dispatch_info(); + // If origin is root, bypass any dispatch filter; root can call anything. + let result = if is_root { + call.dispatch_bypass_filter(origin.clone()) + } else { + let mut filtered_origin = origin.clone(); + // Don't allow users to nest `batch_all` calls. + filtered_origin.add_filter( + move |c: &::RuntimeCall| { + let c = ::RuntimeCall::from_ref(c); + !matches!(c.is_sub_type(), Some(Call::batch_all { .. })) + }, + ); + call.dispatch(filtered_origin) + }; + // Add the weight of this call. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); + result.map_err(|mut err| { + // Take the weight of this function itself into account. + let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); + // Return the actual used weight + base_weight of this call. + err.post_info = Some(base_weight.saturating_add(weight)).into(); + err + })?; + Self::deposit_event(Event::ItemCompleted); + } + Self::deposit_event(Event::BatchCompleted); + let base_weight = T::WeightInfo::batch_all(calls_len as u32); + Ok(Some(base_weight.saturating_add(weight)).into()) + } + + /// Dispatches a function call with a provided origin. + /// + /// The dispatch origin for this call must be _Root_. + /// + /// ## Complexity + /// - O(1). + #[pallet::call_index(3)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::dispatch_as() + .saturating_add(dispatch_info.weight), + dispatch_info.class, + ) + })] + pub fn dispatch_as( + origin: OriginFor, + as_origin: Box, + call: Box<::RuntimeCall>, + ) -> DispatchResult { + ensure_root(origin)?; + + let res = call.dispatch_bypass_filter((*as_origin).into()); + + Self::deposit_event(Event::DispatchedAs { + result: res.map(|_| ()).map_err(|e| e.error), + }); + Ok(()) + } + + /// Send a batch of dispatch calls. + /// Unlike `batch`, it allows errors and won't interrupt. + /// + /// May be called from any origin except `None`. + /// + /// - `calls`: The calls to be dispatched from the same origin. The number of call must not + /// exceed the constant: `batched_calls_limit` (available in constant metadata). + /// + /// If origin is root then the calls are dispatch without checking origin filter. (This + /// includes bypassing `frame_system::Config::BaseCallFilter`). + /// + /// ## Complexity + /// - O(C) where C is the number of calls to be batched. + #[pallet::call_index(4)] + #[pallet::weight({ + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); + (dispatch_weight, dispatch_class) + })] + pub fn force_batch( + origin: OriginFor, + calls: Vec<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()); + } + + let is_root = ensure_root(origin.clone()).is_ok(); + let calls_len = calls.len(); + ensure!( + calls_len <= Self::batched_calls_limit() as usize, + Error::::TooManyCalls + ); + + // Track the actual weight of each of the batch calls. + let mut weight = Weight::zero(); + // Track failed dispatch occur. + let mut has_error: bool = false; + for call in calls.into_iter() { + let info = call.get_dispatch_info(); + // If origin is root, don't apply any dispatch filters; root can call anything. + let result = if is_root { + call.dispatch_bypass_filter(origin.clone()) + } else { + call.dispatch(origin.clone()) + }; + // Add the weight of this call. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); + if let Err(e) = result { + has_error = true; + Self::deposit_event(Event::ItemFailed { error: e.error }); + } else { + Self::deposit_event(Event::ItemCompleted); + } + } + if has_error { + Self::deposit_event(Event::BatchCompletedWithErrors); + } else { + Self::deposit_event(Event::BatchCompleted); + } + let base_weight = T::WeightInfo::batch(calls_len as u32); + Ok(Some(base_weight.saturating_add(weight)).into()) + } + + /// Dispatch a function call with a specified weight. + /// + /// This function does not check the weight of the call, and instead allows the + /// Root origin to specify the weight of the call. + /// + /// The dispatch origin for this call must be _Root_. + #[pallet::call_index(5)] + #[pallet::weight((*weight, call.get_dispatch_info().class))] + pub fn with_weight( + origin: OriginFor, + call: Box<::RuntimeCall>, + weight: Weight, + ) -> DispatchResult { + ensure_root(origin)?; + let _ = weight; // Explicitly don't check the the weight witness. + + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + res.map(|_| ()).map_err(|e| e.error) + } + } + + impl Pallet { + /// Get the accumulated `weight` and the dispatch class for the given `calls`. + fn weight_and_dispatch_class( + calls: &[::RuntimeCall], + ) -> (Weight, DispatchClass) { + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()); + let (dispatch_weight, dispatch_class) = dispatch_infos.fold( + (Weight::zero(), DispatchClass::Operational), + |(total_weight, dispatch_class), di| { + ( + if di.pays_fee == Pays::Yes { + total_weight.saturating_add(di.weight) + } else { + total_weight + }, + if di.class == DispatchClass::Normal { + di.class + } else { + dispatch_class + }, + ) + }, + ); + + (dispatch_weight, dispatch_class) + } + } +} + +/// A pallet identifier. These are per pallet and should be stored in a registry somewhere. +#[freeze_struct("7e600c53ace0630a")] +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] +struct IndexedUtilityPalletId(u16); + +impl TypeId for IndexedUtilityPalletId { + const TYPE_ID: [u8; 4] = *b"suba"; +} + +impl Pallet { + /// Derive a derivative account ID from the owner account and the sub-account index. + pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { + let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); + Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .expect("infinite length input; no invalid inputs for type; qed") + } +} diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs new file mode 100644 index 0000000000..34ed8d323e --- /dev/null +++ b/pallets/utility/src/tests.rs @@ -0,0 +1,1002 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests for Utility Pallet + +#![cfg(test)] +#![allow(clippy::arithmetic_side_effects)] + +use super::*; + +use crate as utility; +use frame_support::{ + assert_err_ignore_postinfo, assert_noop, assert_ok, derive_impl, + dispatch::{DispatchErrorWithPostInfo, Pays}, + parameter_types, storage, + traits::{ConstU64, Contains}, + weights::Weight, +}; +use pallet_collective::{EnsureProportionAtLeast, Instance1}; +use sp_runtime::{ + traits::{BadOrigin, BlakeTwo256, Dispatchable, Hash}, + BuildStorage, DispatchError, TokenError, +}; + +type BlockNumber = u64; + +// example module to test behaviors. +#[frame_support::pallet(dev_mode)] +#[allow(clippy::large_enum_variant)] +pub mod example { + use frame_support::{dispatch::WithPostDispatchInfo, pallet_prelude::*}; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(*_weight)] + pub fn noop(_origin: OriginFor, _weight: Weight) -> DispatchResult { + Ok(()) + } + + #[pallet::call_index(1)] + #[pallet::weight(*_start_weight)] + pub fn foobar( + origin: OriginFor, + err: bool, + _start_weight: Weight, + end_weight: Option, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + if err { + let error: DispatchError = "The cake is a lie.".into(); + if let Some(weight) = end_weight { + Err(error.with_weight(weight)) + } else { + Err(error)? + } + } else { + Ok(end_weight.into()) + } + } + + #[pallet::call_index(2)] + #[pallet::weight(0)] + pub fn big_variant(_origin: OriginFor, _arg: [u8; 400]) -> DispatchResult { + Ok(()) + } + } +} + +mod mock_democracy { + pub use pallet::*; + #[frame_support::pallet(dev_mode)] + pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + Sized { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + type ExternalMajorityOrigin: EnsureOrigin; + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(3)] + #[pallet::weight(0)] + pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { + T::ExternalMajorityOrigin::ensure_origin(origin)?; + Self::deposit_event(Event::::ExternalProposed); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + ExternalProposed, + } + } +} + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system = 1, + Timestamp: pallet_timestamp = 2, + Balances: pallet_balances = 3, + RootTesting: pallet_root_testing = 4, + Council: pallet_collective:: = 5, + Utility: utility = 6, + Example: example = 7, + Democracy: mock_democracy = 8, + } +); + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::MAX); +} +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = BlockWeights; + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type AccountStore = System; +} + +impl pallet_root_testing::Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<3>; + type WeightInfo = (); +} + +const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; +parameter_types! { + pub const MultisigDepositBase: u64 = 1; + pub const MultisigDepositFactor: u64 = 1; + pub const MaxSignatories: u32 = 3; + pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; + pub const MaxProposals: u32 = 100; + pub const MaxMembers: u32 = 100; + pub MaxProposalWeight: Weight = BlockWeights::get().max_block.saturating_div(2); +} + +pub struct MemberProposals; +impl pallet_collective::CanPropose for MemberProposals { + fn can_propose(who: &u64) -> bool { + [1, 2, 3].contains(who) + } +} + +pub struct MemberVotes; +impl pallet_collective::CanVote for MemberVotes { + fn can_vote(who: &u64) -> bool { + [1, 2, 3].contains(who) + } +} + +pub struct StoredVotingMembers; +impl pallet_collective::GetVotingMembers for StoredVotingMembers { + fn get_count() -> u32 { + 3 + } +} + +type CouncilCollective = pallet_collective::Instance1; +impl pallet_collective::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type MotionDuration = MotionDuration; + type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = pallet_collective::PrimeDefaultVote; + type WeightInfo = (); + type SetMembersOrigin = frame_system::EnsureRoot; + type CanPropose = MemberProposals; + type CanVote = MemberVotes; + type GetVotingMembers = StoredVotingMembers; +} + +impl example::Config for Test {} + +pub struct TestBaseCallFilter; +impl Contains for TestBaseCallFilter { + fn contains(c: &RuntimeCall) -> bool { + match *c { + // Transfer works. Use `transfer_keep_alive` for a call that doesn't pass the filter. + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) => true, + RuntimeCall::Utility(_) => true, + // For benchmarking, this acts as a noop call + RuntimeCall::System(frame_system::Call::remark { .. }) => true, + // For tests + RuntimeCall::Example(_) => true, + // For council origin tests. + RuntimeCall::Democracy(_) => true, + _ => false, + } + } +} +impl mock_democracy::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ExternalMajorityOrigin = EnsureProportionAtLeast; +} +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = (); +} + +type ExampleCall = example::Call; +type UtilityCall = crate::Call; + +use frame_system::Call as SystemCall; +use pallet_balances::Call as BalancesCall; +use pallet_root_testing::Call as RootTestingCall; +use pallet_timestamp::Call as TimestampCall; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default() + .build_storage() + .expect("Failed to build storage for test"); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], + } + .assimilate_storage(&mut t) + .expect("Failed to build storage for test"); + + pallet_collective::GenesisConfig:: { + members: vec![1, 2, 3], + phantom: Default::default(), + } + .assimilate_storage(&mut t) + .expect("Failed to build storage for test"); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) +} + +fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> RuntimeCall { + RuntimeCall::Example(ExampleCall::foobar { + err, + start_weight, + end_weight, + }) +} + +#[test] +fn as_derivative_works() { + new_test_ext().execute_with(|| { + let sub_1_0 = Utility::derivative_account_id(1, 0); + assert_ok!(Balances::transfer_allow_death( + RuntimeOrigin::signed(1), + sub_1_0, + 5 + )); + assert_err_ignore_postinfo!( + Utility::as_derivative(RuntimeOrigin::signed(1), 1, Box::new(call_transfer(6, 3)),), + TokenError::FundsUnavailable, + ); + assert_ok!(Utility::as_derivative( + RuntimeOrigin::signed(1), + 0, + Box::new(call_transfer(2, 3)), + )); + assert_eq!(Balances::free_balance(sub_1_0), 2); + assert_eq!(Balances::free_balance(2), 13); + }); +} + +#[test] +fn as_derivative_handles_weight_refund() { + new_test_ext().execute_with(|| { + let start_weight = Weight::from_parts(100, 0); + let end_weight = Weight::from_parts(75, 0); + let diff = start_weight - end_weight; + + // Full weight when ok + let inner_call = call_foobar(false, start_weight, None); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when ok + let inner_call = call_foobar(false, start_weight, Some(end_weight)); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + // Diff is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight - diff); + + // Full weight when err + let inner_call = call_foobar(true, start_weight, None); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_noop!( + result, + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + // No weight is refunded + actual_weight: Some(info.weight), + pays_fee: Pays::Yes, + }, + error: DispatchError::Other("The cake is a lie."), + } + ); + + // Refund weight when err + let inner_call = call_foobar(true, start_weight, Some(end_weight)); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_noop!( + result, + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + // Diff is refunded + actual_weight: Some(info.weight - diff), + pays_fee: Pays::Yes, + }, + error: DispatchError::Other("The cake is a lie."), + } + ); + }); +} + +#[test] +fn as_derivative_filters() { + new_test_ext().execute_with(|| { + assert_err_ignore_postinfo!( + Utility::as_derivative( + RuntimeOrigin::signed(1), + 1, + Box::new(RuntimeCall::Balances( + pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 } + )), + ), + DispatchError::from(frame_system::Error::::CallFiltered), + ); + }); +} + +#[test] +fn batch_with_root_works() { + new_test_ext().execute_with(|| { + let k = b"a".to_vec(); + let call = RuntimeCall::System(frame_system::Call::set_storage { + items: vec![(k.clone(), k.clone())], + }); + assert!(!TestBaseCallFilter::contains(&call)); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch( + RuntimeOrigin::root(), + vec![ + RuntimeCall::Balances(BalancesCall::force_transfer { + source: 1, + dest: 2, + value: 5 + }), + RuntimeCall::Balances(BalancesCall::force_transfer { + source: 1, + dest: 2, + value: 5 + }), + call, // Check filters are correctly bypassed + ] + )); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + assert_eq!(storage::unhashed::get_raw(&k), Some(k)); + }); +} + +#[test] +fn batch_with_signed_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch( + RuntimeOrigin::signed(1), + vec![call_transfer(2, 5), call_transfer(2, 5)] + ),); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); +} + +#[test] +fn batch_with_signed_filters() { + new_test_ext().execute_with(|| { + assert_ok!(Utility::batch( + RuntimeOrigin::signed(1), + vec![RuntimeCall::Balances( + pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 } + )] + ),); + System::assert_last_event( + utility::Event::BatchInterrupted { + index: 0, + error: frame_system::Error::::CallFiltered.into(), + } + .into(), + ); + }); +} + +#[test] +fn batch_early_exit_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch( + RuntimeOrigin::signed(1), + vec![ + call_transfer(2, 5), + call_transfer(2, 10), + call_transfer(2, 5), + ] + ),); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + }); +} + +#[test] +fn batch_weight_calculation_doesnt_overflow() { + use sp_runtime::Perbill; + new_test_ext().execute_with(|| { + let big_call = RuntimeCall::RootTesting(RootTestingCall::fill_block { + ratio: Perbill::from_percent(50), + }); + assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); + + // 3 * 50% saturates to 100% + let batch_call = RuntimeCall::Utility(crate::Call::batch { + calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], + }); + + assert_eq!(batch_call.get_dispatch_info().weight, Weight::MAX); + }); +} + +#[test] +fn batch_handles_weight_refund() { + new_test_ext().execute_with(|| { + let start_weight = Weight::from_parts(100, 0); + let end_weight = Weight::from_parts(75, 0); + let diff = start_weight - end_weight; + let batch_len = 4; + + // Full weight when ok + let inner_call = call_foobar(false, start_weight, None); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when ok + let inner_call = call_foobar(false, start_weight, Some(end_weight)); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + // Diff is refunded + assert_eq!( + extract_actual_weight(&result, &info), + info.weight - diff * batch_len + ); + + // Full weight when err + let good_call = call_foobar(false, start_weight, None); + let bad_call = call_foobar(true, start_weight, None); + let batch_calls = vec![good_call, bad_call]; + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + System::assert_last_event( + utility::Event::BatchInterrupted { + index: 1, + error: DispatchError::Other(""), + } + .into(), + ); + // No weight is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when err + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call]; + let batch_len = batch_calls.len() as u64; + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + System::assert_last_event( + utility::Event::BatchInterrupted { + index: 1, + error: DispatchError::Other(""), + } + .into(), + ); + assert_eq!( + extract_actual_weight(&result, &info), + info.weight - diff * batch_len + ); + + // Partial batch completion + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call.clone(), bad_call]; + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + System::assert_last_event( + utility::Event::BatchInterrupted { + index: 1, + error: DispatchError::Other(""), + } + .into(), + ); + assert_eq!( + extract_actual_weight(&result, &info), + // Real weight is 2 calls at end_weight + ::WeightInfo::batch(2) + end_weight * 2, + ); + }); +} + +#[test] +fn batch_all_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch_all( + RuntimeOrigin::signed(1), + vec![call_transfer(2, 5), call_transfer(2, 5)] + ),); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); +} + +#[test] +fn batch_all_revert() { + new_test_ext().execute_with(|| { + let call = call_transfer(2, 5); + let info = call.get_dispatch_info(); + + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + let batch_all_calls = RuntimeCall::Utility(crate::Call::::batch_all { + calls: vec![ + call_transfer(2, 5), + call_transfer(2, 10), + call_transfer(2, 5), + ], + }); + assert_noop!( + batch_all_calls.dispatch(RuntimeOrigin::signed(1)), + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some( + ::WeightInfo::batch_all(2) + info.weight * 2 + ), + pays_fee: Pays::Yes + }, + error: TokenError::FundsUnavailable.into(), + } + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }); +} + +#[test] +fn batch_all_handles_weight_refund() { + new_test_ext().execute_with(|| { + let start_weight = Weight::from_parts(100, 0); + let end_weight = Weight::from_parts(75, 0); + let diff = start_weight - end_weight; + let batch_len = 4; + + // Full weight when ok + let inner_call = call_foobar(false, start_weight, None); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when ok + let inner_call = call_foobar(false, start_weight, Some(end_weight)); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_ok!(result); + // Diff is refunded + assert_eq!( + extract_actual_weight(&result, &info), + info.weight - diff * batch_len + ); + + // Full weight when err + let good_call = call_foobar(false, start_weight, None); + let bad_call = call_foobar(true, start_weight, None); + let batch_calls = vec![good_call, bad_call]; + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_err_ignore_postinfo!(result, "The cake is a lie."); + // No weight is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when err + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call]; + let batch_len = batch_calls.len() as u64; + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_err_ignore_postinfo!(result, "The cake is a lie."); + assert_eq!( + extract_actual_weight(&result, &info), + info.weight - diff * batch_len + ); + + // Partial batch completion + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call.clone(), bad_call]; + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(1)); + assert_err_ignore_postinfo!(result, "The cake is a lie."); + assert_eq!( + extract_actual_weight(&result, &info), + // Real weight is 2 calls at end_weight + ::WeightInfo::batch_all(2).saturating_add(end_weight.saturating_mul(2)), + ); + }); +} + +#[test] +fn batch_all_does_not_nest() { + new_test_ext().execute_with(|| { + let batch_all = RuntimeCall::Utility(UtilityCall::batch_all { + calls: vec![ + call_transfer(2, 1), + call_transfer(2, 1), + call_transfer(2, 1), + ], + }); + + let info = batch_all.get_dispatch_info(); + + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + // A nested batch_all call will not pass the filter, and fail with `BadOrigin`. + assert_noop!( + Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_all.clone()]), + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), + pays_fee: Pays::Yes + }, + error: frame_system::Error::::CallFiltered.into(), + } + ); + + // And for those who want to get a little fancy, we check that the filter persists across + // other kinds of dispatch wrapping functions... in this case + // `batch_all(batch(batch_all(..)))` + let batch_nested = RuntimeCall::Utility(UtilityCall::batch { + calls: vec![batch_all], + }); + // Batch will end with `Ok`, but does not actually execute as we can see from the event + // and balances. + assert_ok!(Utility::batch_all( + RuntimeOrigin::signed(1), + vec![batch_nested] + )); + System::assert_has_event( + utility::Event::BatchInterrupted { + index: 0, + error: frame_system::Error::::CallFiltered.into(), + } + .into(), + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }); +} + +#[test] +fn batch_limit() { + new_test_ext().execute_with(|| { + let calls = vec![RuntimeCall::System(SystemCall::remark { remark: vec![] }); 40_000]; + assert_noop!( + Utility::batch(RuntimeOrigin::signed(1), calls.clone()), + Error::::TooManyCalls + ); + assert_noop!( + Utility::batch_all(RuntimeOrigin::signed(1), calls), + Error::::TooManyCalls + ); + }); +} + +#[test] +fn force_batch_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::force_batch( + RuntimeOrigin::signed(1), + vec![ + call_transfer(2, 5), + call_foobar(true, Weight::from_parts(75, 0), None), + call_transfer(2, 10), + call_transfer(2, 5), + ] + )); + System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); + System::assert_has_event( + utility::Event::ItemFailed { + error: DispatchError::Other(""), + } + .into(), + ); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + + assert_ok!(Utility::force_batch( + RuntimeOrigin::signed(2), + vec![call_transfer(1, 5), call_transfer(1, 5),] + )); + System::assert_last_event(utility::Event::BatchCompleted.into()); + + assert_ok!(Utility::force_batch( + RuntimeOrigin::signed(1), + vec![call_transfer(2, 50),] + ),); + System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); + }); +} + +#[test] +fn none_origin_does_not_work() { + new_test_ext().execute_with(|| { + assert_noop!( + Utility::force_batch(RuntimeOrigin::none(), vec![]), + BadOrigin + ); + assert_noop!(Utility::batch(RuntimeOrigin::none(), vec![]), BadOrigin); + assert_noop!(Utility::batch_all(RuntimeOrigin::none(), vec![]), BadOrigin); + }) +} + +#[test] +fn batch_doesnt_work_with_inherents() { + new_test_ext().execute_with(|| { + // fails because inherents expect the origin to be none. + assert_ok!(Utility::batch( + RuntimeOrigin::signed(1), + vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 }),] + )); + System::assert_last_event( + utility::Event::BatchInterrupted { + index: 0, + error: frame_system::Error::::CallFiltered.into(), + } + .into(), + ); + }) +} + +#[test] +fn force_batch_doesnt_work_with_inherents() { + new_test_ext().execute_with(|| { + // fails because inherents expect the origin to be none. + assert_ok!(Utility::force_batch( + RuntimeOrigin::root(), + vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 }),] + )); + System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); + }) +} + +#[test] +fn batch_all_doesnt_work_with_inherents() { + new_test_ext().execute_with(|| { + let batch_all = RuntimeCall::Utility(UtilityCall::batch_all { + calls: vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 })], + }); + let info = batch_all.get_dispatch_info(); + + // fails because inherents expect the origin to be none. + assert_noop!( + batch_all.dispatch(RuntimeOrigin::signed(1)), + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(info.weight), + pays_fee: Pays::Yes + }, + error: frame_system::Error::::CallFiltered.into(), + } + ); + }) +} + +#[test] +fn batch_works_with_council_origin() { + new_test_ext().execute_with(|| { + let proposal = RuntimeCall::Utility(UtilityCall::batch { + calls: vec![RuntimeCall::Democracy( + mock_democracy::Call::external_propose_majority {}, + )], + }); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Council::propose( + RuntimeOrigin::signed(1), + Box::new(proposal.clone()), + proposal_len, + 3, + )); + + assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); + + System::set_block_number(4); + + assert_ok!(Council::close( + RuntimeOrigin::root(), + hash, + 0, + proposal_weight, + proposal_len + )); + + System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { + proposal_hash: hash, + result: Ok(()), + })); + }) +} + +#[test] +fn force_batch_works_with_council_origin() { + new_test_ext().execute_with(|| { + let proposal = RuntimeCall::Utility(UtilityCall::force_batch { + calls: vec![RuntimeCall::Democracy( + mock_democracy::Call::external_propose_majority {}, + )], + }); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Council::propose( + RuntimeOrigin::signed(1), + Box::new(proposal.clone()), + proposal_len, + 3, + )); + + assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); + + System::set_block_number(4); + assert_ok!(Council::close( + RuntimeOrigin::root(), + hash, + 0, + proposal_weight, + proposal_len + )); + + System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { + proposal_hash: hash, + result: Ok(()), + })); + }) +} + +#[test] +fn batch_all_works_with_council_origin() { + new_test_ext().execute_with(|| { + assert_ok!(Utility::batch_all( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(3, 3)), + vec![RuntimeCall::Democracy( + mock_democracy::Call::external_propose_majority {} + )] + )); + }) +} + +#[test] +fn with_weight_works() { + new_test_ext().execute_with(|| { + use frame_system::WeightInfo; + let upgrade_code_call = Box::new(RuntimeCall::System( + frame_system::Call::set_code_without_checks { code: vec![] }, + )); + // Weight before is max. + assert_eq!( + upgrade_code_call.get_dispatch_info().weight, + ::SystemWeightInfo::set_code() + ); + assert_eq!( + upgrade_code_call.get_dispatch_info().class, + frame_support::dispatch::DispatchClass::Operational + ); + + let with_weight_call = Call::::with_weight { + call: upgrade_code_call, + weight: Weight::from_parts(123, 456), + }; + // Weight after is set by Root. + assert_eq!( + with_weight_call.get_dispatch_info().weight, + Weight::from_parts(123, 456) + ); + assert_eq!( + with_weight_call.get_dispatch_info().class, + frame_support::dispatch::DispatchClass::Operational + ); + }) +} diff --git a/pallets/utility/src/weights.rs b/pallets/utility/src/weights.rs new file mode 100644 index 0000000000..502f85a3f1 --- /dev/null +++ b/pallets/utility/src/weights.rs @@ -0,0 +1,196 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/utility/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_utility`. +pub trait WeightInfo { + fn batch(c: u32, ) -> Weight; + fn as_derivative() -> Weight; + fn batch_all(c: u32, ) -> Weight; + fn dispatch_as() -> Weight; + fn force_batch(c: u32, ) -> Weight; +} + +/// Weights for `pallet_utility` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(2_694_370, 3997) + // Standard Error: 5_055 + .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 9_263_000 picoseconds. + Weight::from_parts(9_639_000, 3997) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_120_000 picoseconds. + Weight::from_parts(12_948_874, 3997) + // Standard Error: 4_643 + .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_126_000 picoseconds. + Weight::from_parts(7_452_000, 0) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_254_000 picoseconds. + Weight::from_parts(4_879_712, 3997) + // Standard Error: 4_988 + .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(2_694_370, 3997) + // Standard Error: 5_055 + .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 9_263_000 picoseconds. + Weight::from_parts(9_639_000, 3997) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_120_000 picoseconds. + Weight::from_parts(12_948_874, 3997) + // Standard Error: 4_643 + .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_126_000 picoseconds. + Weight::from_parts(7_452_000, 0) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_254_000 picoseconds. + Weight::from_parts(4_879_712, 3997) + // Standard Error: 4_988 + .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } +} diff --git a/precompiles/src/extensions.rs b/precompiles/src/extensions.rs index 90cc85ff03..2d3d65a41c 100644 --- a/precompiles/src/extensions.rs +++ b/precompiles/src/extensions.rs @@ -4,9 +4,10 @@ use alloc::format; use frame_support::dispatch::{GetDispatchInfo, Pays, PostDispatchInfo}; use frame_system::RawOrigin; +use pallet_admin_utils::{PrecompileEnable, PrecompileEnum}; use pallet_evm::{ - AddressMapping, BalanceConverter, ExitError, GasWeightMapping, PrecompileFailure, - PrecompileHandle, + AddressMapping, BalanceConverter, ExitError, GasWeightMapping, Precompile, PrecompileFailure, + PrecompileHandle, PrecompileResult, }; use precompile_utils::EvmResult; use sp_core::{H160, U256, blake2_256}; @@ -90,7 +91,7 @@ pub(crate) trait PrecompileHandleExt: PrecompileHandle { ); } - log::info!("Dispatch succeeded. Post info: {:?}", post_info); + log::debug!("Dispatch succeeded. Post info: {:?}", post_info); Ok(()) } @@ -109,7 +110,7 @@ pub(crate) trait PrecompileHandleExt: PrecompileHandle { impl PrecompileHandleExt for T where T: PrecompileHandle {} -pub(crate) trait PrecompileExt> { +pub(crate) trait PrecompileExt>: Precompile { const INDEX: u64; // ss58 public key i.e., the contract sends funds it received to the destination address from @@ -127,8 +128,29 @@ pub(crate) trait PrecompileExt> { hash.into() } + + fn try_execute( + handle: &mut impl PrecompileHandle, + precompile_enum: PrecompileEnum, + ) -> Option + where + R: frame_system::Config + pallet_admin_utils::Config, + { + if PrecompileEnable::::get(&precompile_enum) { + Some(Self::execute(handle)) + } else { + Some(Err(PrecompileFailure::Error { + exit_status: ExitError::Other( + format!("Precompile {:?} is disabled", precompile_enum).into(), + ), + })) + } + } } +// allowing unreachable for the whole module fixes clippy reports about precompile macro +// implementation for `TestPrecompile`, that couldn't be fixed granularly +#[allow(unreachable_code)] #[cfg(test)] mod test { use super::*; @@ -152,4 +174,7 @@ mod test { impl PrecompileExt for TestPrecompile { const INDEX: u64 = 2051; } + + #[precompile_utils::precompile] + impl TestPrecompile {} } diff --git a/precompiles/src/lib.rs b/precompiles/src/lib.rs index 66068a58bb..ed0c2222a2 100644 --- a/precompiles/src/lib.rs +++ b/precompiles/src/lib.rs @@ -6,8 +6,8 @@ use core::marker::PhantomData; use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; use pallet_evm::{ - AddressMapping, ExitError, IsPrecompileResult, Precompile, PrecompileFailure, PrecompileHandle, - PrecompileResult, PrecompileSet, + AddressMapping, IsPrecompileResult, Precompile, PrecompileHandle, PrecompileResult, + PrecompileSet, }; use pallet_evm_precompile_modexp::Modexp; use pallet_evm_precompile_sha3fips::Sha3FIPS256; @@ -17,7 +17,7 @@ use sp_runtime::traits::Dispatchable; use sp_runtime::traits::StaticLookup; use subtensor_runtime_common::ProxyType; -use pallet_admin_utils::{PrecompileEnable, PrecompileEnum}; +use pallet_admin_utils::PrecompileEnum; use crate::balance_transfer::*; use crate::ed25519::*; @@ -45,7 +45,7 @@ where + pallet_admin_utils::Config + pallet_subtensor::Config + pallet_proxy::Config, - R::AccountId: From<[u8; 32]> + ByteArray, + R::AccountId: From<[u8; 32]> + ByteArray + Into<[u8; 32]>, ::RuntimeCall: From> + From> + From> @@ -69,7 +69,7 @@ where + pallet_admin_utils::Config + pallet_subtensor::Config + pallet_proxy::Config, - R::AccountId: From<[u8; 32]> + ByteArray, + R::AccountId: From<[u8; 32]> + ByteArray + Into<[u8; 32]>, ::RuntimeCall: From> + From> + From> @@ -111,7 +111,7 @@ where + pallet_admin_utils::Config + pallet_subtensor::Config + pallet_proxy::Config, - R::AccountId: From<[u8; 32]> + ByteArray, + R::AccountId: From<[u8; 32]> + ByteArray + Into<[u8; 32]>, ::RuntimeCall: From> + From> + From> @@ -138,61 +138,25 @@ where } // Subtensor specific precompiles : a if a == hash(BalanceTransferPrecompile::::INDEX) => { - if PrecompileEnable::::get(PrecompileEnum::BalanceTransfer) { - Some(BalanceTransferPrecompile::::execute(handle)) - } else { - Some(Err(PrecompileFailure::Error { - exit_status: ExitError::Other( - "Precompile Balance Transfer is disabled".into(), - ), - })) - } + BalanceTransferPrecompile::::try_execute::( + handle, + PrecompileEnum::BalanceTransfer, + ) } a if a == hash(StakingPrecompile::::INDEX) => { - if PrecompileEnable::::get(PrecompileEnum::Staking) { - Some(StakingPrecompile::::execute(handle)) - } else { - Some(Err(PrecompileFailure::Error { - exit_status: ExitError::Other("Precompile Staking is disabled".into()), - })) - } + StakingPrecompile::::try_execute::(handle, PrecompileEnum::Staking) } a if a == hash(StakingPrecompileV2::::INDEX) => { - if PrecompileEnable::::get(PrecompileEnum::Staking) { - Some(StakingPrecompileV2::::execute(handle)) - } else { - Some(Err(PrecompileFailure::Error { - exit_status: ExitError::Other("Precompile Staking is disabled".into()), - })) - } + StakingPrecompileV2::::try_execute::(handle, PrecompileEnum::Staking) } - a if a == hash(SubnetPrecompile::::INDEX) => { - if PrecompileEnable::::get(PrecompileEnum::Subnet) { - Some(SubnetPrecompile::::execute(handle)) - } else { - Some(Err(PrecompileFailure::Error { - exit_status: ExitError::Other("Precompile Subnet is disabled".into()), - })) - } + SubnetPrecompile::::try_execute::(handle, PrecompileEnum::Subnet) } a if a == hash(MetagraphPrecompile::::INDEX) => { - if PrecompileEnable::::get(PrecompileEnum::Metagraph) { - Some(MetagraphPrecompile::::execute(handle)) - } else { - Some(Err(PrecompileFailure::Error { - exit_status: ExitError::Other("Precompile Metagrah is disabled".into()), - })) - } + MetagraphPrecompile::::try_execute::(handle, PrecompileEnum::Metagraph) } a if a == hash(NeuronPrecompile::::INDEX) => { - if PrecompileEnable::::get(PrecompileEnum::Neuron) { - Some(NeuronPrecompile::::execute(handle)) - } else { - Some(Err(PrecompileFailure::Error { - exit_status: ExitError::Other("Precompile Neuron is disabled".into()), - })) - } + NeuronPrecompile::::try_execute::(handle, PrecompileEnum::Neuron) } _ => None, } diff --git a/precompiles/src/solidity/stakingV2.abi b/precompiles/src/solidity/stakingV2.abi index 21dd2761e4..16adb1d8a8 100644 --- a/precompiles/src/solidity/stakingV2.abi +++ b/precompiles/src/solidity/stakingV2.abi @@ -35,6 +35,30 @@ "stateMutability": "payable", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "getAlphaStakedValidators", + "outputs": [ + { + "internalType": "uint256[]", + "name": "", + "type": "uint256[]" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -64,6 +88,30 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + } + ], + "name": "getTotalAlphaStaked", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -137,5 +185,71 @@ "outputs": [], "stateMutability": "nonpayable", "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "origin_hotkey", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "destination_hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "origin_netuid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "destination_netuid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "moveStake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "destination_coldkey", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "origin_netuid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "destination_netuid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferStake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" } ] diff --git a/precompiles/src/solidity/stakingV2.sol b/precompiles/src/solidity/stakingV2.sol index 67ac0cb129..dd033cfca8 100644 --- a/precompiles/src/solidity/stakingV2.sol +++ b/precompiles/src/solidity/stakingV2.sol @@ -20,7 +20,11 @@ interface IStaking { * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is * correctly attributed. */ - function addStake(bytes32 hotkey, uint256 amount, uint256 netuid) external payable; + function addStake( + bytes32 hotkey, + uint256 amount, + uint256 netuid + ) external payable; /** * @dev Removes a subtensor stake `amount` from the specified `hotkey`. @@ -46,6 +50,62 @@ interface IStaking { uint256 netuid ) external; + /** + * @dev Moves a subtensor stake `amount` associated with the `hotkey` to a different hotkey + * `destination_hotkey`. + * + * This function allows external accounts and contracts to move staked TAO from one hotkey to another, + * which effectively calls `move_stake` on the subtensor pallet with specified origin and destination + * hotkeys as parameters being the hashed address mappings of H160 sender address to Substrate ss58 + * address as implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param origin_hotkey The origin hotkey public key (32 bytes). + * @param destination_hotkey The destination hotkey public key (32 bytes). + * @param origin_netuid The subnet to move stake from (uint256). + * @param destination_netuid The subnet to move stake to (uint256). + * @param amount The amount to move in rao. + * + * Requirements: + * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring + * that the stake is correctly attributed. + */ + function moveStake( + bytes32 origin_hotkey, + bytes32 destination_hotkey, + uint256 origin_netuid, + uint256 destination_netuid, + uint256 amount + ) external; + + /** + * @dev Transfer a subtensor stake `amount` associated with the transaction signer to a different coldkey + * `destination_coldkey`. + * + * This function allows external accounts and contracts to transfer staked TAO to another coldkey, + * which effectively calls `transfer_stake` on the subtensor pallet with specified destination + * coldkey as a parameter being the hashed address mapping of H160 sender address to Substrate ss58 + * address as implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param destination_coldkey The destination coldkey public key (32 bytes). + * @param hotkey The hotkey public key (32 bytes). + * @param origin_netuid The subnet to move stake from (uint256). + * @param destination_netuid The subnet to move stake to (uint256). + * @param amount The amount to move in rao. + * + * Requirements: + * - `origin_hotkey` and `destination_hotkey` must be valid hotkeys registered on the network, ensuring + * that the stake is correctly attributed. + */ + function transferStake( + bytes32 destination_coldkey, + bytes32 hotkey, + uint256 origin_netuid, + uint256 destination_netuid, + uint256 amount + ) external; + /** * @dev Returns the amount of RAO staked by the coldkey. * @@ -56,7 +116,9 @@ interface IStaking { * @param coldkey The coldkey public key (32 bytes). * @return The amount of RAO staked by the coldkey. */ - function getTotalColdkeyStake(bytes32 coldkey) external view returns (uint256); + function getTotalColdkeyStake( + bytes32 coldkey + ) external view returns (uint256); /** * @dev Returns the total amount of stake under a hotkey (delegative or otherwise) @@ -68,7 +130,9 @@ interface IStaking { * @param hotkey The hotkey public key (32 bytes). * @return The total amount of RAO staked under the hotkey. */ - function getTotalHotkeyStake(bytes32 hotkey) external view returns (uint256); + function getTotalHotkeyStake( + bytes32 hotkey + ) external view returns (uint256); /** * @dev Returns the stake amount associated with the specified `hotkey` and `coldkey`. @@ -100,4 +164,34 @@ interface IStaking { * @param delegate The public key (32 bytes) of the delegate. */ function removeProxy(bytes32 delegate) external; + + /** + * @dev Returns the validators that have staked alpha under a hotkey. + * + * This function retrieves the validators that have staked alpha under a specific hotkey. + * It is a view function, meaning it does not modify the state of the contract and is free to call. + * + * @param hotkey The hotkey public key (32 bytes). + * @param netuid The subnet the stake is on (uint256). + * @return An array of validators that have staked alpha under the hotkey. + */ + function getAlphaStakedValidators( + bytes32 hotkey, + uint256 netuid + ) external view returns (uint256[] memory); + + /** + * @dev Returns the total amount of alpha staked under a hotkey. + * + * This function retrieves the total amount of alpha staked under a specific hotkey. + * It is a view function, meaning it does not modify the state of the contract and is free to call. + * + * @param hotkey The hotkey public key (32 bytes). + * @param netuid The subnet the stake is on (uint256). + * @return The total amount of alpha staked under the hotkey. + */ + function getTotalAlphaStaked( + bytes32 hotkey, + uint256 netuid + ) external view returns (uint256); } diff --git a/precompiles/src/solidity/subnet.abi b/precompiles/src/solidity/subnet.abi index 3cc16d9df7..e2a3e569da 100644 --- a/precompiles/src/solidity/subnet.abi +++ b/precompiles/src/solidity/subnet.abi @@ -883,5 +883,23 @@ "outputs": [], "stateMutability": "payable", "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + }, + { + "internalType": "bool", + "name": "toggle", + "type": "bool" + } + ], + "name": "toggleTransfers", + "outputs": [], + "stateMutability": "payable", + "type": "function" } ] diff --git a/precompiles/src/staking.rs b/precompiles/src/staking.rs index 9022f45a36..8f797a7476 100644 --- a/precompiles/src/staking.rs +++ b/precompiles/src/staking.rs @@ -25,8 +25,8 @@ // - Precompile checks the result of do_remove_stake and, in case of a failure, reverts the transaction. // +use alloc::vec::Vec; use core::marker::PhantomData; - use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; use frame_system::RawOrigin; use pallet_evm::{ @@ -35,6 +35,7 @@ use pallet_evm::{ use precompile_utils::EvmResult; use sp_core::{H256, U256}; use sp_runtime::traits::{Dispatchable, StaticLookup, UniqueSaturatedInto}; +use sp_std::vec; use subtensor_runtime_common::ProxyType; use crate::{PrecompileExt, PrecompileHandleExt}; @@ -52,7 +53,7 @@ where + pallet_evm::Config + pallet_subtensor::Config + pallet_proxy::Config, - R::AccountId: From<[u8; 32]>, + R::AccountId: From<[u8; 32]> + Into<[u8; 32]>, ::RuntimeCall: From> + From> + GetDispatchInfo @@ -70,7 +71,7 @@ where + pallet_evm::Config + pallet_subtensor::Config + pallet_proxy::Config, - R::AccountId: From<[u8; 32]>, + R::AccountId: From<[u8; 32]> + Into<[u8; 32]>, ::RuntimeCall: From> + From> + GetDispatchInfo @@ -119,7 +120,60 @@ where handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) } + #[precompile::public("moveStake(bytes32,bytes32,uint256,uint256,uint256)")] + fn move_stake( + handle: &mut impl PrecompileHandle, + origin_hotkey: H256, + destination_hotkey: H256, + origin_netuid: U256, + destination_netuid: U256, + amount_alpha: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let origin_hotkey = R::AccountId::from(origin_hotkey.0); + let destination_hotkey = R::AccountId::from(destination_hotkey.0); + let origin_netuid = try_u16_from_u256(origin_netuid)?; + let destination_netuid = try_u16_from_u256(destination_netuid)?; + let alpha_amount = amount_alpha.unique_saturated_into(); + let call = pallet_subtensor::Call::::move_stake { + origin_hotkey, + destination_hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } + + #[precompile::public("transferStake(bytes32,bytes32,uint256,uint256,uint256)")] + fn transfer_stake( + handle: &mut impl PrecompileHandle, + destination_coldkey: H256, + hotkey: H256, + origin_netuid: U256, + destination_netuid: U256, + amount_alpha: U256, + ) -> EvmResult<()> { + let account_id = handle.caller_account_id::(); + let destination_coldkey = R::AccountId::from(destination_coldkey.0); + let hotkey = R::AccountId::from(hotkey.0); + let origin_netuid = try_u16_from_u256(origin_netuid)?; + let destination_netuid = try_u16_from_u256(destination_netuid)?; + let alpha_amount = amount_alpha.unique_saturated_into(); + let call = pallet_subtensor::Call::::transfer_stake { + destination_coldkey, + hotkey, + origin_netuid, + destination_netuid, + alpha_amount, + }; + + handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) + } + #[precompile::public("getTotalColdkeyStake(bytes32)")] + #[precompile::view] fn get_total_coldkey_stake( _handle: &mut impl PrecompileHandle, coldkey: H256, @@ -131,6 +185,7 @@ where } #[precompile::public("getTotalHotkeyStake(bytes32)")] + #[precompile::view] fn get_total_hotkey_stake( _handle: &mut impl PrecompileHandle, hotkey: H256, @@ -159,6 +214,41 @@ where Ok(stake.into()) } + #[precompile::public("getAlphaStakedValidators(bytes32,uint256)")] + #[precompile::view] + fn get_alpha_staked_validators( + _handle: &mut impl PrecompileHandle, + hotkey: H256, + netuid: U256, + ) -> EvmResult> { + let hotkey = R::AccountId::from(hotkey.0); + let mut coldkeys: Vec = vec![]; + let netuid = try_u16_from_u256(netuid)?; + for ((coldkey, netuid_in_alpha), _) in pallet_subtensor::Alpha::::iter_prefix((hotkey,)) + { + if netuid == netuid_in_alpha { + let key: [u8; 32] = coldkey.into(); + coldkeys.push(key.into()); + } + } + + Ok(coldkeys) + } + + #[precompile::public("getTotalAlphaStaked(bytes32,uint256)")] + #[precompile::view] + fn get_total_alpha_staked( + _handle: &mut impl PrecompileHandle, + hotkey: H256, + netuid: U256, + ) -> EvmResult { + let hotkey = R::AccountId::from(hotkey.0); + let netuid = try_u16_from_u256(netuid)?; + let stake = pallet_subtensor::Pallet::::get_stake_for_hotkey_on_subnet(&hotkey, netuid); + + Ok(stake.into()) + } + #[precompile::public("addProxy(bytes32)")] fn add_proxy(handle: &mut impl PrecompileHandle, delegate: H256) -> EvmResult<()> { let account_id = handle.caller_account_id::(); @@ -275,6 +365,7 @@ where } #[precompile::public("getTotalColdkeyStake(bytes32)")] + #[precompile::view] fn get_total_coldkey_stake( _handle: &mut impl PrecompileHandle, coldkey: H256, @@ -292,6 +383,7 @@ where } #[precompile::public("getTotalHotkeyStake(bytes32)")] + #[precompile::view] fn get_total_hotkey_stake( _handle: &mut impl PrecompileHandle, hotkey: H256, diff --git a/precompiles/src/subnet.rs b/precompiles/src/subnet.rs index cffe82ab78..e9bfc0c5f9 100644 --- a/precompiles/src/subnet.rs +++ b/precompiles/src/subnet.rs @@ -200,19 +200,12 @@ where #[precompile::public("setWeightsSetRateLimit(uint16,uint64)")] #[precompile::payable] fn set_weights_set_rate_limit( - handle: &mut impl PrecompileHandle, - netuid: u16, - weights_set_rate_limit: u64, + _handle: &mut impl PrecompileHandle, + _netuid: u16, + _weights_set_rate_limit: u64, ) -> EvmResult<()> { - let call = pallet_admin_utils::Call::::sudo_set_weights_set_rate_limit { - netuid, - weights_set_rate_limit, - }; - - handle.try_dispatch_runtime_call::( - call, - RawOrigin::Signed(handle.caller_account_id::()), - ) + // DEPRECATED. Subnet owner cannot set weight setting rate limits + Ok(()) } #[precompile::public("getAdjustmentAlpha(uint16)")] @@ -436,16 +429,12 @@ where #[precompile::public("setMinBurn(uint16,uint64)")] #[precompile::payable] fn set_min_burn( - handle: &mut impl PrecompileHandle, - netuid: u16, - min_burn: u64, + _handle: &mut impl PrecompileHandle, + _netuid: u16, + _min_burn: u64, ) -> EvmResult<()> { - let call = pallet_admin_utils::Call::::sudo_set_min_burn { netuid, min_burn }; - - handle.try_dispatch_runtime_call::( - call, - RawOrigin::Signed(handle.caller_account_id::()), - ) + // DEPRECATED. The subnet owner cannot set the min burn anymore. + Ok(()) } #[precompile::public("getMaxBurn(uint16)")] @@ -616,4 +605,19 @@ where RawOrigin::Signed(handle.caller_account_id::()), ) } + + #[precompile::public("toggleTransfers(uint16,bool)")] + #[precompile::payable] + fn toggle_transfers( + handle: &mut impl PrecompileHandle, + netuid: u16, + toggle: bool, + ) -> EvmResult<()> { + let call = pallet_admin_utils::Call::::sudo_set_toggle_transfer { netuid, toggle }; + + handle.try_dispatch_runtime_call::( + call, + RawOrigin::Signed(handle.caller_account_id::()), + ) + } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 80413dc9f1..5b87dbf03d 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -33,7 +33,7 @@ use pallet_registry::CanRegisterIdentity; use pallet_subtensor::rpc_info::{ delegate_info::DelegateInfo, dynamic_info::DynamicInfo, - metagraph::Metagraph, + metagraph::{Metagraph, SelectiveMetagraph}, neuron_info::{NeuronInfo, NeuronInfoLite}, show_subnet::SubnetState, stake_info::StakeInfo, @@ -89,6 +89,8 @@ pub use sp_runtime::{Perbill, Permill}; use core::marker::PhantomData; +use scale_info::TypeInfo; + // Frontier use fp_rpc::TransactionStatus; use pallet_ethereum::{Call::transact, PostLogContent, Transaction as EthereumTransaction}; @@ -205,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 245, + spec_version: 260, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -918,12 +920,22 @@ impl pallet_registry::Config for Runtime { } parameter_types! { - pub const MaxCommitFields: u32 = 1; + pub const MaxCommitFieldsInner: u32 = 1; pub const CommitmentInitialDeposit: Balance = 0; // Free pub const CommitmentFieldDeposit: Balance = 0; // Free pub const CommitmentRateLimit: BlockNumber = 100; // Allow commitment every 100 blocks } +#[subtensor_macros::freeze_struct("7c76bd954afbb54e")] +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct MaxCommitFields; +impl Get for MaxCommitFields { + fn get() -> u32 { + MaxCommitFieldsInner::get() + } +} + +#[subtensor_macros::freeze_struct("c39297f5eb97ee82")] pub struct AllowCommitments; impl CanCommit for AllowCommitments { #[cfg(not(feature = "runtime-benchmarks"))] @@ -948,6 +960,20 @@ impl pallet_commitments::Config for Runtime { type InitialDeposit = CommitmentInitialDeposit; type FieldDeposit = CommitmentFieldDeposit; type DefaultRateLimit = CommitmentRateLimit; + type TempoInterface = TempoInterface; +} + +pub struct TempoInterface; +impl pallet_commitments::GetTempoInterface for TempoInterface { + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) + } +} + +impl pallet_commitments::GetTempoInterface for Runtime { + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) + } } #[cfg(not(feature = "fast-blocks"))] @@ -984,7 +1010,7 @@ parameter_types! { pub const SubtensorInitialMaxRegistrationsPerBlock: u16 = 1; pub const SubtensorInitialPruningScore : u16 = u16::MAX; pub const SubtensorInitialBondsMovingAverage: u64 = 900_000; - pub const SubtensorInitialBondsPenalty: u16 = 0; + pub const SubtensorInitialBondsPenalty: u16 = u16::MAX; pub const SubtensorInitialDefaultTake: u16 = 11_796; // 18% honest number. pub const SubtensorInitialMinDelegateTake: u16 = 0; // Allow 0% delegate take pub const SubtensorInitialDefaultChildKeyTake: u16 = 0; // Allow 0% childkey take @@ -1017,6 +1043,12 @@ parameter_types! { pub const InitialColdkeySwapScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const SubtensorInitialTaoWeight: u64 = 971_718_665_099_567_868; // 0.05267697438728329% tao weight. + pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = if cfg!(feature = "fast-blocks") { + 10 // Only 10 blocks for fast blocks + } else { + 7 * 24 * 60 * 60 / 12 // 7 days + }; } impl pallet_subtensor::Config for Runtime { @@ -1080,6 +1112,8 @@ impl pallet_subtensor::Config for Runtime { type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; + type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } use sp_runtime::BoundedVec; @@ -2050,6 +2084,11 @@ impl_runtime_apis! { fn get_all_dynamic_info() -> Vec>> { SubtensorModule::get_all_dynamic_info() } + + fn get_selective_metagraph(netuid: u16, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) + } + } impl subtensor_custom_rpc_runtime_api::StakeInfoRuntimeApi for Runtime { @@ -2064,6 +2103,10 @@ impl_runtime_apis! { fn get_stake_info_for_hotkey_coldkey_netuid( hotkey_account: AccountId32, coldkey_account: AccountId32, netuid: u16 ) -> Option> { SubtensorModule::get_stake_info_for_hotkey_coldkey_netuid( hotkey_account, coldkey_account, netuid ) } + + fn get_stake_fee( origin: Option<(AccountId32, u16)>, origin_coldkey_account: AccountId32, destination: Option<(AccountId32, u16)>, destination_coldkey_account: AccountId32, amount: u64 ) -> u64 { + SubtensorModule::get_stake_fee( origin, origin_coldkey_account, destination, destination_coldkey_account, amount ) + } } impl subtensor_custom_rpc_runtime_api::SubnetRegistrationRuntimeApi for Runtime { diff --git a/scripts/localnet.sh b/scripts/localnet.sh index b82b5f9f59..4de17f1521 100755 --- a/scripts/localnet.sh +++ b/scripts/localnet.sh @@ -2,10 +2,15 @@ # Check if `--no-purge` passed as a parameter NO_PURGE=0 + +# Check if `--build-only` passed as parameter +BUILD_ONLY=0 + for arg in "$@"; do if [ "$arg" = "--no-purge" ]; then NO_PURGE=1 - break + elif [ "$arg" = "--build-only" ]; then + BUILD_ONLY=1 fi done @@ -15,94 +20,105 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" # The base directory of the subtensor project BASE_DIR="$SCRIPT_DIR/.." -# get parameters # Get the value of fast_blocks from the first argument fast_blocks=${1:-"True"} -# Check the value of fast_blocks +# Define the target directory for compilation if [ "$fast_blocks" == "False" ]; then # Block of code to execute if fast_blocks is False echo "fast_blocks is Off" : "${CHAIN:=local}" : "${BUILD_BINARY:=1}" : "${FEATURES:="pow-faucet"}" + BUILD_DIR="$BASE_DIR/target/non-fast-blocks" else # Block of code to execute if fast_blocks is not False echo "fast_blocks is On" : "${CHAIN:=local}" : "${BUILD_BINARY:=1}" : "${FEATURES:="pow-faucet fast-blocks"}" + BUILD_DIR="$BASE_DIR/target/fast-blocks" fi +# Ensure the build directory exists +mkdir -p "$BUILD_DIR" + SPEC_PATH="${SCRIPT_DIR}/specs/" FULL_PATH="$SPEC_PATH$CHAIN.json" -# Kill any existing nodes which may have not exited correctly after a previous -# run. +# Kill any existing nodes which may have not exited correctly after a previous run. pkill -9 'node-subtensor' if [ ! -d "$SPEC_PATH" ]; then echo "*** Creating directory ${SPEC_PATH}..." - mkdir $SPEC_PATH + mkdir -p "$SPEC_PATH" fi if [[ $BUILD_BINARY == "1" ]]; then echo "*** Building substrate binary..." - cargo build --workspace --profile=release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" + CARGO_TARGET_DIR="$BUILD_DIR" cargo build --workspace --profile=release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" echo "*** Binary compiled" fi echo "*** Building chainspec..." -"$BASE_DIR/target/release/node-subtensor" build-spec --disable-default-bootnode --raw --chain $CHAIN >$FULL_PATH +"$BUILD_DIR/release/node-subtensor" build-spec --disable-default-bootnode --raw --chain "$CHAIN" >"$FULL_PATH" echo "*** Chainspec built and output to file" -# generate node keys -$BASE_DIR/target/release/node-subtensor key generate-node-key --chain="$FULL_PATH" --base-path /tmp/alice -$BASE_DIR/target/release/node-subtensor key generate-node-key --chain="$FULL_PATH" --base-path /tmp/bob +# Generate node keys +"$BUILD_DIR/release/node-subtensor" key generate-node-key --chain="$FULL_PATH" --base-path /tmp/alice +"$BUILD_DIR/release/node-subtensor" key generate-node-key --chain="$FULL_PATH" --base-path /tmp/bob if [ $NO_PURGE -eq 1 ]; then echo "*** Purging previous state skipped..." else echo "*** Purging previous state..." - "$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/bob --chain="$FULL_PATH" >/dev/null 2>&1 - "$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/alice --chain="$FULL_PATH" >/dev/null 2>&1 + "$BUILD_DIR/release/node-subtensor" purge-chain -y --base-path /tmp/bob --chain="$FULL_PATH" >/dev/null 2>&1 + "$BUILD_DIR/release/node-subtensor" purge-chain -y --base-path /tmp/alice --chain="$FULL_PATH" >/dev/null 2>&1 echo "*** Previous chainstate purged" fi -echo "*** Starting localnet nodes..." -alice_start=( - "$BASE_DIR/target/release/node-subtensor" - --base-path /tmp/alice - --chain="$FULL_PATH" - --alice - --port 30334 - --rpc-port 9944 - --validator - --rpc-cors=all - --allow-private-ipv4 - --discover-local - --unsafe-force-node-key-generation -) - -bob_start=( - "$BASE_DIR"/target/release/node-subtensor - --base-path /tmp/bob - --chain="$FULL_PATH" - --bob - --port 30335 - --rpc-port 9945 - --validator - --rpc-cors=all - --allow-private-ipv4 - --discover-local - --unsafe-force-node-key-generation -# --offchain-worker=Never -) - -trap 'pkill -P $$' EXIT SIGINT SIGTERM - -( - ("${alice_start[@]}" 2>&1) & - ("${bob_start[@]}" 2>&1) - wait -) +if [ $BUILD_ONLY -eq 0 ]; then + echo "*** Starting localnet nodes..." + + alice_start=( + "$BUILD_DIR/release/node-subtensor" + --base-path /tmp/alice + --chain="$FULL_PATH" + --alice + --port 30334 + --rpc-port 9944 + --validator + --rpc-cors=all + --allow-private-ipv4 + --discover-local + --unsafe-force-node-key-generation + ) + + bob_start=( + "$BUILD_DIR/release/node-subtensor" + --base-path /tmp/bob + --chain="$FULL_PATH" + --bob + --port 30335 + --rpc-port 9945 + --validator + --rpc-cors=all + --allow-private-ipv4 + --discover-local + --unsafe-force-node-key-generation + ) + + # Provide RUN_IN_DOCKER local environment variable if run script in the docker image + if [ "${RUN_IN_DOCKER}" == "1" ]; then + alice_start+=(--unsafe-rpc-external) + bob_start+=(--unsafe-rpc-external) + fi + + trap 'pkill -P $$' EXIT SIGINT SIGTERM + + ( + ("${alice_start[@]}" 2>&1) & + ("${bob_start[@]}" 2>&1) + wait + ) +fi