diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml new file mode 100644 index 0000000000..1a574eb1d8 --- /dev/null +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -0,0 +1,292 @@ +name: Bittensor Bittensor E2E Test + +permissions: + pull-requests: write + contents: read + +concurrency: + group: e2e-cli-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + branches: + - devnet + - devnet-ready + - testnet + - testnet-ready + - main + types: [opened, synchronize, reopened, labeled, unlabeled] + +env: + CARGO_TERM_COLOR: always + VERBOSE: ${{ github.event.inputs.verbose }} + +jobs: + apply-label-to-new-pr: + runs-on: ubuntu-latest + if: ${{ github.event.pull_request.draft == false }} + outputs: + should_continue: ${{ steps.check.outputs.should_continue }} + steps: + - name: Check + id: check + run: | + ACTION="${{ github.event.action }}" + if [[ "$ACTION" == "opened" || "$ACTION" == "reopened" ]]; then + echo "should_continue=true" >> $GITHUB_OUTPUT + else + echo "should_continue=false" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: Add label + if: steps.check.outputs.should_continue == 'true' + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + labels: run-bittensor-e2e-tests + + check-label: + needs: apply-label-to-new-pr + runs-on: ubuntu-latest + if: always() + outputs: + run-bittensor-e2e-tests: ${{ steps.get-labels.outputs.run-bittensor-e2e-tests }} + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Get labels from PR + id: get-labels + run: | + LABELS=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') + echo "Current labels: $LABELS" + if echo "$LABELS" | grep -q "run-bittensor-e2e-tests"; then + echo "run-bittensor-e2e-tests=true" >> $GITHUB_ENV + echo "::set-output name=run-bittensor-e2e-tests::true" + else + echo "run-bittensor-e2e-tests=false" >> $GITHUB_ENV + echo "::set-output name=run-bittensor-e2e-tests::false" + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + find-btcli-e2e-tests: + needs: check-label + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + outputs: + test-files: ${{ steps.get-btcli-tests.outputs.test-files }} + steps: + - name: Research preparation + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/btcli.git + + - name: Checkout + working-directory: ${{ github.workspace }}/btcli + run: git checkout staging + + - name: Install dependencies + run: sudo apt-get install -y jq + + - name: Find e2e test files + id: get-btcli-tests + run: | + test_files=$(find ${{ github.workspace }}/btcli/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') + echo "::set-output name=test-files::$test_files" + shell: bash + + find-sdk-e2e-tests: + needs: check-label + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + outputs: + test-files: ${{ steps.get-sdk-tests.outputs.test-files }} + steps: + - name: Research preparation + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/bittensor.git + + - name: Checkout + working-directory: ${{ github.workspace }}/bittensor + run: git checkout staging + + - name: Install dependencies + run: sudo apt-get install -y jq + + - name: Find e2e test files + id: get-sdk-tests + run: | + test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') + echo "::set-output name=test-files::$test_files" + shell: bash + + build-image-with-current-branch: + needs: check-label + runs-on: SubtensorCI + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker Image + run: docker build -f Dockerfile-localnet -t localnet . + + - name: Save Docker Image as Tar + run: docker save -o subtensor-localnet.tar localnet + + - name: Upload Docker Image as Artifact + uses: actions/upload-artifact@v4 + with: + name: subtensor-localnet + path: subtensor-localnet.tar + + # main btcli job + run-btcli-e2e-tests: + needs: + - check-label + - find-btcli-e2e-tests + - build-image-with-current-branch + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + strategy: + fail-fast: false + max-parallel: 16 + matrix: + rust-branch: + - stable + rust-target: + - x86_64-unknown-linux-gnu + os: + - ubuntu-latest + test-file: ${{ fromJson(needs.find-btcli-e2e-tests.outputs.test-files) }} + + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + TARGET: ${{ matrix.rust-target }} + + timeout-minutes: 60 + name: "cli: ${{ matrix.test-file }}" + steps: + - name: Check-out repository + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }} + run: uv venv ${{ github.workspace }}/venv + + - name: Clone Bittensor CLI repo + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/btcli.git + + - name: Setup Bittensor-cli from cloned repo + working-directory: ${{ github.workspace }}/btcli + run: | + source ${{ github.workspace }}/venv/bin/activate + git checkout staging + git fetch origin staging + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + + - name: Install uv dependencies + working-directory: ${{ github.workspace }}/btcli + run: uv sync --all-extras --dev + + - name: Download Cached Docker Image + uses: actions/download-artifact@v4 + with: + name: subtensor-localnet + + - name: Load Docker Image + run: docker load -i subtensor-localnet.tar + + - name: Run tests + working-directory: ${{ github.workspace }}/btcli + run: | + source ${{ github.workspace }}/venv/bin/activate + uv run pytest ${{ matrix.test-file }} -s + + # main sdk job + run-sdk-e2e-tests: + needs: + - check-label + - find-sdk-e2e-tests + - build-image-with-current-branch + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' + runs-on: ubuntu-latest + strategy: + fail-fast: false + max-parallel: 16 + matrix: + rust-branch: + - stable + rust-target: + - x86_64-unknown-linux-gnu + os: + - ubuntu-latest + test-file: ${{ fromJson(needs.find-sdk-e2e-tests.outputs.test-files) }} + + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + TARGET: ${{ matrix.rust-target }} + + timeout-minutes: 60 + name: "sdk: ${{ matrix.test-file }}" + steps: + - name: Check-out repository + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }} + run: uv venv ${{ github.workspace }}/venv + + - name: Clone Bittensor SDK repo + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/bittensor.git + + - name: Setup Bittensor SDK from cloned repo + working-directory: ${{ github.workspace }}/bittensor + run: | + source ${{ github.workspace }}/venv/bin/activate + git checkout staging + git fetch origin staging + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + + - name: Install uv dependencies + working-directory: ${{ github.workspace }}/bittensor + run: uv sync --all-extras --dev + + - name: Download Cached Docker Image + uses: actions/download-artifact@v4 + with: + name: subtensor-localnet + + - name: Load Docker Image + run: docker load -i subtensor-localnet.tar + + - name: Run tests + working-directory: ${{ github.workspace }}/bittensor + run: | + source ${{ github.workspace }}/venv/bin/activate + uv run pytest ${{ matrix.test-file }} -s \ No newline at end of file diff --git a/.github/workflows/check-btcli-tests.yml b/.github/workflows/check-btcli-tests.yml deleted file mode 100644 index 1307774742..0000000000 --- a/.github/workflows/check-btcli-tests.yml +++ /dev/null @@ -1,191 +0,0 @@ -name: Bittensor BTCLI Test - -permissions: - pull-requests: write - contents: read - -concurrency: - group: e2e-cli-${{ github.ref }} - cancel-in-progress: true - -on: - pull_request: - branches: - - devnet - - devnet-ready - - testnet - - testnet-ready - - main - types: [opened, synchronize, reopened, labeled, unlabeled] - -env: - CARGO_TERM_COLOR: always - VERBOSE: ${{ github.event.inputs.verbose }} - -jobs: - apply-label-to-new-pr: - runs-on: ubuntu-latest - if: ${{ github.event.pull_request.draft == false }} - outputs: - should_continue_cli: ${{ steps.check.outputs.should_continue_cli }} - steps: - - name: Check - id: check - run: | - ACTION="${{ github.event.action }}" - if [[ "$ACTION" == "opened" || "$ACTION" == "reopened" ]]; then - echo "should_continue_cli=true" >> $GITHUB_OUTPUT - else - echo "should_continue_cli=false" >> $GITHUB_OUTPUT - fi - shell: bash - - - name: Add label - if: steps.check.outputs.should_continue_cli == 'true' - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - labels: run-bittensor-cli-tests - - check-labels: - needs: apply-label-to-new-pr - runs-on: ubuntu-latest - if: always() - outputs: - run-cli-tests: ${{ steps.get-labels.outputs.run-cli-tests }} - steps: - - name: Check out repository - uses: actions/checkout@v4 - - - name: Get labels from PR - id: get-labels - run: | - LABELS=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') - echo "Current labels: $LABELS" - if echo "$LABELS" | grep -q "run-bittensor-cli-tests"; then - echo "run-cli-tests=true" >> $GITHUB_ENV - echo "::set-output name=run-cli-tests::true" - else - echo "run-cli-tests=false" >> $GITHUB_ENV - echo "::set-output name=run-cli-tests::false" - fi - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - find-e2e-tests: - needs: check-labels - if: always() && needs.check-labels.outputs.run-cli-tests == 'true' - runs-on: ubuntu-latest - outputs: - test-files: ${{ steps.get-tests.outputs.test-files }} - steps: - - name: Research preparation - working-directory: ${{ github.workspace }} - run: git clone https://github.com/opentensor/btcli.git - - - name: Checkout - working-directory: ${{ github.workspace }}/btcli - run: git checkout staging - - - name: Install dependencies - run: sudo apt-get install -y jq - - - name: Find e2e test files - id: get-tests - run: | - test_files=$(find ${{ github.workspace }}/btcli/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "::set-output name=test-files::$test_files" - shell: bash - - pull-docker-image: - needs: check-labels - runs-on: ubuntu-latest - if: always() && needs.check-labels.outputs.run-cli-tests == 'true' - steps: - - name: Log in to GitHub Container Registry - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $GITHUB_ACTOR --password-stdin - - - name: Pull Docker Image - run: docker pull ghcr.io/opentensor/subtensor-localnet:latest - - - name: Save Docker Image to Cache - run: docker save -o subtensor-localnet.tar ghcr.io/opentensor/subtensor-localnet:latest - - - name: Upload Docker Image as Artifact - uses: actions/upload-artifact@v4 - with: - name: subtensor-localnet - path: subtensor-localnet.tar - - # main job - run-e2e-tests: - needs: - - check-labels - - find-e2e-tests - - pull-docker-image - - if: always() && needs.check-labels.outputs.run-cli-tests == 'true' - runs-on: ubuntu-latest - strategy: - fail-fast: false - max-parallel: 16 - matrix: - rust-branch: - - stable - rust-target: - - x86_64-unknown-linux-gnu - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-e2e-tests.outputs.test-files) }} - - env: - RELEASE_NAME: development - RUSTV: ${{ matrix.rust-branch }} - RUST_BACKTRACE: full - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} - - timeout-minutes: 60 - name: "cli: ${{ matrix.test-file }}" - steps: - - name: Check-out repository - uses: actions/checkout@v4 - - - name: Install uv - uses: astral-sh/setup-uv@v5 - - - name: Create Python virtual environment - working-directory: ${{ github.workspace }} - run: uv venv ${{ github.workspace }}/venv - - - name: Clone Bittensor CLI repo - working-directory: ${{ github.workspace }} - run: git clone https://github.com/opentensor/btcli.git - - - name: Setup Bittensor-cli from cloned repo - working-directory: ${{ github.workspace }}/btcli - run: | - source ${{ github.workspace }}/venv/bin/activate - git checkout staging - git fetch origin staging - uv run --active pip install --upgrade pip - uv run --active pip install '.[dev]' - uv run --active pip install pytest - - - name: Install uv dependencies - working-directory: ${{ github.workspace }}/btcli - run: uv sync --all-extras --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet - - - name: Load Docker Image - run: docker load -i subtensor-localnet.tar - - - name: Run tests - working-directory: ${{ github.workspace }}/btcli - run: | - source ${{ github.workspace }}/venv/bin/activate - uv run pytest ${{ matrix.test-file }} -s diff --git a/.github/workflows/check-docker-localnet.yml b/.github/workflows/check-docker-localnet.yml deleted file mode 100644 index 126b718d8c..0000000000 --- a/.github/workflows/check-docker-localnet.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Build Localnet Docker Image - -on: - pull_request: - -jobs: - build: - runs-on: SubtensorCI - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build Docker Image - run: docker build -f Dockerfile-localnet -t localnet . diff --git a/.github/workflows/check-sdk-tests.yml b/.github/workflows/check-sdk-tests.yml deleted file mode 100644 index d54308c17b..0000000000 --- a/.github/workflows/check-sdk-tests.yml +++ /dev/null @@ -1,190 +0,0 @@ -name: Bittensor SDK Test - -permissions: - pull-requests: write - contents: read - -concurrency: - group: e2e-sdk-${{ github.ref }} - cancel-in-progress: true - -on: - pull_request: - branches: - - devnet - - devnet-ready - - testnet - - testnet-ready - - main - types: [opened, synchronize, reopened, labeled, unlabeled] - -env: - CARGO_TERM_COLOR: always - VERBOSE: ${{ github.event.inputs.verbose }} - -jobs: - apply-label-to-new-pr: - runs-on: ubuntu-latest - if: ${{ github.event.pull_request.draft == false }} - outputs: - should_continue_sdk: ${{ steps.check.outputs.should_continue_sdk }} - steps: - - name: Check - id: check - run: | - ACTION="${{ github.event.action }}" - if [[ "$ACTION" == "opened" || "$ACTION" == "reopened" ]]; then - echo "should_continue_sdk=true" >> $GITHUB_OUTPUT - else - echo "should_continue_sdk=false" >> $GITHUB_OUTPUT - fi - shell: bash - - - name: Add label - if: steps.check.outputs.should_continue_sdk == 'true' - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - labels: run-bittensor-sdk-tests - - check-labels: - needs: apply-label-to-new-pr - runs-on: ubuntu-latest - if: always() - outputs: - run-sdk-tests: ${{ steps.get-labels.outputs.run-sdk-tests }} - steps: - - name: Check out repository - uses: actions/checkout@v4 - - - name: Get labels from PR - id: get-labels - run: | - sleep 5 - LABELS=$(gh api repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels --jq '.[].name') - echo "Current labels: $LABELS" - if echo "$LABELS" | grep -q "run-bittensor-sdk-tests"; then - echo "run-sdk-tests=true" >> $GITHUB_OUTPUT - else - echo "run-sdk-tests=false" >> $GITHUB_OUTPUT - fi - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - find-e2e-tests: - needs: check-labels - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' - runs-on: ubuntu-latest - outputs: - test-files: ${{ steps.get-tests.outputs.test-files }} - steps: - - name: Research preparation - working-directory: ${{ github.workspace }} - run: git clone https://github.com/opentensor/bittensor.git - - - name: Checkout - working-directory: ${{ github.workspace }}/bittensor - run: git checkout staging - - - name: Install dependencies - run: sudo apt-get install -y jq - - - name: Find e2e test files - id: get-tests - run: | - test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "::set-output name=test-files::$test_files" - shell: bash - - pull-docker-image: - needs: check-labels - runs-on: ubuntu-latest - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' - steps: - - name: Log in to GitHub Container Registry - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $GITHUB_ACTOR --password-stdin - - - name: Pull Docker Image - run: docker pull ghcr.io/opentensor/subtensor-localnet:devnet-ready - - - name: Save Docker Image to Cache - run: docker save -o subtensor-localnet.tar ghcr.io/opentensor/subtensor-localnet:devnet-ready - - - name: Upload Docker Image as Artifact - uses: actions/upload-artifact@v4 - with: - name: subtensor-localnet - path: subtensor-localnet.tar - - # main job - run-e2e-tests: - needs: - - check-labels - - find-e2e-tests - - pull-docker-image - - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' - runs-on: ubuntu-latest - strategy: - fail-fast: false - max-parallel: 16 - matrix: - rust-branch: - - stable - rust-target: - - x86_64-unknown-linux-gnu - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-e2e-tests.outputs.test-files) }} - - env: - RELEASE_NAME: development - RUSTV: ${{ matrix.rust-branch }} - RUST_BACKTRACE: full - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} - - timeout-minutes: 60 - name: "sdk: ${{ matrix.test-file }}" - steps: - - name: Check-out repository - uses: actions/checkout@v4 - - - name: Install uv - uses: astral-sh/setup-uv@v5 - - - name: Create Python virtual environment - working-directory: ${{ github.workspace }} - run: uv venv ${{ github.workspace }}/venv - - - name: Clone Bittensor SDK repo - working-directory: ${{ github.workspace }} - run: git clone https://github.com/opentensor/bittensor.git - - - name: Setup Bittensor SDK from cloned repo - working-directory: ${{ github.workspace }}/bittensor - run: | - source ${{ github.workspace }}/venv/bin/activate - git checkout staging - git fetch origin staging - uv run --active pip install --upgrade pip - uv run --active pip install '.[dev]' - uv run --active pip install pytest - - - name: Install uv dependencies - working-directory: ${{ github.workspace }}/bittensor - run: uv sync --all-extras --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet - - - name: Load Docker Image - run: docker load -i subtensor-localnet.tar - - - name: Run tests - working-directory: ${{ github.workspace }}/bittensor - run: | - source ${{ github.workspace }}/venv/bin/activate - uv run pytest ${{ matrix.test-file }} -s diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index cd6460bfe4..c2afccae66 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -45,20 +45,20 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . file: Dockerfile-localnet diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2b36e37282..3eb52ab86f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -47,20 +47,20 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . push: true @@ -93,20 +93,20 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . push: true diff --git a/Cargo.lock b/Cargo.lock index 385175fcff..51a10fdf02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6089,18 +6089,27 @@ dependencies = [ name = "pallet-commitments" version = "4.0.0-dev" dependencies = [ + "ark-serialize", "enumflags2", "frame-benchmarking", "frame-support", "frame-system", + "hex", + "log", "pallet-balances", + "pallet-drand", + "pallet-subtensor", "parity-scale-codec", + "rand_chacha", "scale-info", + "sha2 0.10.8", "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "subtensor-macros", + "tle", + "w3f-bls", ] [[package]] diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 0efaf5b47a..0de11cb866 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -62,4 +62,6 @@ ENV RUN_IN_DOCKER=1 EXPOSE 30334 30335 9944 9945 ENTRYPOINT ["/scripts/localnet.sh"] +# Fast blocks defaults to True, you can disable it by passing False to the docker command, e.g.: +# docker run ghcr.io/opentensor/subtensor-localnet False CMD ["True"] diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index fc0d016198..99c11b7165 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -86,7 +86,7 @@ parameter_types! { pub const InitialImmunityPeriod: u16 = 2; pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; - pub const InitialBondsPenalty: u16 = 0; + pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18% honest number. @@ -135,6 +135,7 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialTaoWeight: u64 = u64::MAX/10; // 10% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // 7 days } impl pallet_subtensor::Config for Test { @@ -199,6 +200,7 @@ impl pallet_subtensor::Config for Test { type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index 7fb22aa1fb..7b2f49ace8 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -29,6 +29,18 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } enumflags2 = { workspace = true } +pallet-drand = { path = "../drand", default-features = false } +tle = { workspace = true, default-features = false } +ark-serialize = { workspace = true, default-features = false } +w3f-bls = { workspace = true, default-features = false } +rand_chacha = { workspace = true } +hex = { workspace = true } +sha2 = { workspace = true } + +log = { workspace = true } + +pallet-subtensor = { path = "../subtensor", default-features = false } + [dev-dependencies] sp-core = { workspace = true } sp-io = { workspace = true } @@ -47,18 +59,31 @@ std = [ "enumflags2/std", "pallet-balances/std", "sp-core/std", - "sp-io/std" + "sp-io/std", + "ark-serialize/std", + "log/std", + "pallet-drand/std", + "tle/std", + "w3f-bls/std", + "hex/std", + "rand_chacha/std", + "sha2/std", + "pallet-subtensor/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-balances/runtime-benchmarks" + "pallet-balances/runtime-benchmarks", + "pallet-drand/runtime-benchmarks", + "pallet-subtensor/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", - "sp-runtime/try-runtime" + "sp-runtime/try-runtime", + "pallet-drand/try-runtime", + "pallet-subtensor/try-runtime" ] diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index ba11dbe52a..a62084de5c 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -4,6 +4,9 @@ mod benchmarking; #[cfg(test)] mod tests; +#[cfg(test)] +mod mock; + pub mod types; pub mod weights; @@ -12,9 +15,18 @@ use subtensor_macros::freeze_struct; pub use types::*; pub use weights::WeightInfo; -use frame_support::traits::Currency; +use ark_serialize::CanonicalDeserialize; +use frame_support::{BoundedVec, traits::Currency}; +use scale_info::prelude::collections::BTreeSet; +use sp_runtime::SaturatedConversion; use sp_runtime::{Saturating, traits::Zero}; -use sp_std::boxed::Box; +use sp_std::{boxed::Box, vec::Vec}; +use tle::{ + curves::drand::TinyBLS381, + stream_ciphers::AESGCMStreamCipherProvider, + tlock::{TLECiphertext, tld}, +}; +use w3f_bls::EngineBLS; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -31,7 +43,7 @@ pub mod pallet { // Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] - pub trait Config: frame_system::Config { + pub trait Config: frame_system::Config + pallet_drand::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -46,7 +58,7 @@ pub mod pallet { /// The maximum number of additional fields that can be added to a commitment #[pallet::constant] - type MaxFields: Get; + type MaxFields: Get + TypeInfo + 'static; /// The amount held on deposit for a registered identity #[pallet::constant] @@ -59,6 +71,15 @@ pub mod pallet { /// The rate limit for commitments #[pallet::constant] type DefaultRateLimit: Get>; + + /// Used to retreive the given subnet's tempo + type TempoInterface: GetTempoInterface; + } + + /// Used to retreive the given subnet's tempo + pub trait GetTempoInterface { + /// Used to retreive the epoch index for the given subnet. + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64; } #[pallet::event] @@ -71,6 +92,22 @@ pub mod pallet { /// The account who: T::AccountId, }, + /// A timelock-encrypted commitment was set + TimelockCommitment { + /// The netuid of the commitment + netuid: u16, + /// The account + who: T::AccountId, + /// The drand round to reveal + reveal_round: u64, + }, + /// A timelock-encrypted commitment was auto-revealed + CommitmentRevealed { + /// The netuid of the commitment + netuid: u16, + /// The account + who: T::AccountId, + }, } #[pallet::error] @@ -81,18 +118,28 @@ pub mod pallet { AccountNotAllowedCommit, /// Account is trying to commit data too fast, rate limit exceeded CommitmentSetRateLimitExceeded, + /// Space Limit Exceeded for the current interval + SpaceLimitExceeded, + /// Indicates that unreserve returned a leftover, which is unexpected. + UnexpectedUnreserveLeftover, } #[pallet::type_value] - /// Default value for commitment rate limit. + /// *DEPRECATED* Default value for commitment rate limit. pub fn DefaultRateLimit() -> BlockNumberFor { T::DefaultRateLimit::get() } - /// The rate limit for commitments + /// *DEPRECATED* The rate limit for commitments #[pallet::storage] pub type RateLimit = StorageValue<_, BlockNumberFor, ValueQuery, DefaultRateLimit>; + /// Tracks all CommitmentOf that have at least one timelocked field. + #[pallet::storage] + #[pallet::getter(fn timelocked_index)] + pub type TimelockedIndex = + StorageValue<_, BTreeSet<(u16, T::AccountId)>, ValueQuery>; + /// Identity data by account #[pallet::storage] #[pallet::getter(fn commitment_of)] @@ -117,16 +164,44 @@ pub mod pallet { BlockNumberFor, OptionQuery, >; + #[pallet::storage] + #[pallet::getter(fn revealed_commitments)] + pub(super) type RevealedCommitments = StorageDoubleMap< + _, + Identity, + u16, + Twox64Concat, + T::AccountId, + RevealedData, T::MaxFields, BlockNumberFor>, + OptionQuery, + >; + + /// Maps (netuid, who) -> usage (how many “bytes” they've committed) + /// in the RateLimit window + #[pallet::storage] + #[pallet::getter(fn used_space_of)] + pub type UsedSpaceOf = + StorageDoubleMap<_, Identity, u16, Twox64Concat, T::AccountId, UsageTracker, OptionQuery>; + + #[pallet::type_value] + /// The default Maximum Space + pub fn DefaultMaxSpace() -> u32 { + 3100 + } + + #[pallet::storage] + #[pallet::getter(fn max_space_per_user_per_rate_limit)] + pub type MaxSpace = StorageValue<_, u32, ValueQuery, DefaultMaxSpace>; #[pallet::call] impl Pallet { /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( - T::WeightInfo::set_commitment(), - DispatchClass::Operational, - Pays::No - ))] + ::WeightInfo::set_commitment(), + DispatchClass::Operational, + Pays::No + ))] pub fn set_commitment( origin: OriginFor, netuid: u16, @@ -145,28 +220,47 @@ pub mod pallet { ); let cur_block = >::block_number(); - if let Some(last_commit) = >::get(netuid, &who) { - ensure!( - cur_block >= last_commit.saturating_add(RateLimit::::get()), - Error::::CommitmentSetRateLimitExceeded - ); + + let required_space: u64 = info + .fields + .iter() + .map(|field| field.len_for_rate_limit()) + .sum(); + + let mut usage = UsedSpaceOf::::get(netuid, &who).unwrap_or_default(); + let cur_block_u64 = cur_block.saturated_into::(); + let current_epoch = T::TempoInterface::get_epoch_index(netuid, cur_block_u64); + + if usage.last_epoch != current_epoch { + usage.last_epoch = current_epoch; + usage.used_space = 0; } - let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); + let max_allowed = MaxSpace::::get() as u64; + ensure!( + usage.used_space.saturating_add(required_space) <= max_allowed, + Error::::SpaceLimitExceeded + ); + + usage.used_space = usage.used_space.saturating_add(required_space); + + UsedSpaceOf::::insert(netuid, &who, usage); + let mut id = match >::get(netuid, &who) { Some(mut id) => { - id.info = *info; + id.info = *info.clone(); id.block = cur_block; id } None => Registration { - info: *info, + info: *info.clone(), block: cur_block, deposit: Zero::zero(), }, }; let old_deposit = id.deposit; + let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); id.deposit = T::InitialDeposit::get().saturating_add(fd); if id.deposit > old_deposit { T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; @@ -174,12 +268,38 @@ pub mod pallet { if old_deposit > id.deposit { let err_amount = T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); - debug_assert!(err_amount.is_zero()); + if !err_amount.is_zero() { + return Err(Error::::UnexpectedUnreserveLeftover.into()); + } } >::insert(netuid, &who, id); >::insert(netuid, &who, cur_block); - Self::deposit_event(Event::Commitment { netuid, who }); + + if let Some(Data::TimelockEncrypted { reveal_round, .. }) = info + .fields + .iter() + .find(|data| matches!(data, Data::TimelockEncrypted { .. })) + { + Self::deposit_event(Event::TimelockCommitment { + netuid, + who: who.clone(), + reveal_round: *reveal_round, + }); + + TimelockedIndex::::mutate(|index| { + index.insert((netuid, who.clone())); + }); + } else { + Self::deposit_event(Event::Commitment { + netuid, + who: who.clone(), + }); + + TimelockedIndex::::mutate(|index| { + index.remove(&(netuid, who.clone())); + }); + } Ok(()) } @@ -187,7 +307,7 @@ pub mod pallet { /// Sudo-set the commitment rate limit #[pallet::call_index(1)] #[pallet::weight(( - T::WeightInfo::set_rate_limit(), + ::WeightInfo::set_rate_limit(), DispatchClass::Operational, Pays::No ))] @@ -196,6 +316,33 @@ pub mod pallet { RateLimit::::set(rate_limit_blocks.into()); Ok(()) } + + /// Sudo-set MaxSpace + #[pallet::call_index(2)] + #[pallet::weight(( + ::WeightInfo::set_rate_limit(), + DispatchClass::Operational, + Pays::No + ))] + pub fn set_max_space(origin: OriginFor, new_limit: u32) -> DispatchResult { + ensure_root(origin)?; + MaxSpace::::set(new_limit); + Ok(()) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: BlockNumberFor) -> Weight { + if let Err(e) = Self::reveal_timelocked_commitments() { + log::debug!( + "Failed to unveil matured commitments on block {:?}: {:?}", + n, + e + ); + } + Weight::from_parts(0, 0) + } } } @@ -328,3 +475,177 @@ where Ok(()) } } + +impl Pallet { + pub fn reveal_timelocked_commitments() -> DispatchResult { + let current_block = >::block_number(); + let index = TimelockedIndex::::get(); + for (netuid, who) in index.clone() { + let Some(mut registration) = >::get(netuid, &who) else { + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + continue; + }; + + let original_fields = registration.info.fields.clone(); + let mut remain_fields = Vec::new(); + let mut revealed_fields = Vec::new(); + + for data in original_fields { + match data { + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + let pulse = match pallet_drand::Pulses::::get(reveal_round) { + Some(p) => p, + None => { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + let sig_reader = &mut &signature_bytes[..]; + let sig = + ::SignatureGroup::deserialize_compressed( + sig_reader, + ) + .map_err(|e| { + log::warn!( + "Failed to deserialize drand signature for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let Some(sig) = sig else { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + }; + + let reader = &mut &encrypted[..]; + let commit = TLECiphertext::::deserialize_compressed(reader) + .map_err(|e| { + log::warn!( + "Failed to deserialize TLECiphertext for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let Some(commit) = commit else { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + }; + + let decrypted_bytes: Vec = + tld::(commit, sig) + .map_err(|e| { + log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) + }) + .ok() + .unwrap_or_default(); + + if decrypted_bytes.is_empty() { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + + let mut reader = &decrypted_bytes[..]; + let revealed_info: CommitmentInfo = + match Decode::decode(&mut reader) { + Ok(info) => info, + Err(e) => { + log::warn!( + "Failed to decode decrypted data for {:?}: {:?}", + who, + e + ); + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + }; + + revealed_fields.push(revealed_info); + } + + other => remain_fields.push(other), + } + } + + if !revealed_fields.is_empty() { + let mut all_revealed_data = Vec::new(); + for info in revealed_fields { + all_revealed_data.extend(info.fields.into_inner()); + } + + let bounded_revealed = BoundedVec::try_from(all_revealed_data) + .map_err(|_| "Could not build BoundedVec for revealed fields")?; + + let combined_revealed_info = CommitmentInfo { + fields: bounded_revealed, + }; + + let revealed_data = RevealedData { + info: combined_revealed_info, + revealed_block: current_block, + deposit: registration.deposit, + }; + >::insert(netuid, &who, revealed_data); + Self::deposit_event(Event::CommitmentRevealed { + netuid, + who: who.clone(), + }); + } + + registration.info.fields = BoundedVec::try_from(remain_fields) + .map_err(|_| "Failed to build BoundedVec for remain_fields")?; + + match registration.info.fields.is_empty() { + true => { + >::remove(netuid, &who); + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + } + false => { + >::insert(netuid, &who, ®istration); + let has_timelock = registration + .info + .fields + .iter() + .any(|f| matches!(f, Data::TimelockEncrypted { .. })); + if !has_timelock { + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + } + } + } + } + + Ok(()) + } +} diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 8866e1c0d5..cc2482ff88 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -1,22 +1,33 @@ use crate as pallet_commitments; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + pallet_prelude::{Get, TypeInfo}, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, BuildStorage, + testing::Header, + traits::{BlakeTwo256, ConstU16, IdentityLookup}, }; -type Block = frame_system::mocking::MockBlock; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; -// Configure a mock runtime to test the pallet. frame_support::construct_runtime!( pub enum Test { System: frame_system = 1, - Commitments: pallet_commitments = 2, + Balances: pallet_balances = 2, + Commitments: pallet_commitments = 3, + Drand: pallet_drand = 4, } ); +pub type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -24,36 +35,238 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; - type AccountData = (); + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = ConstU16<42>; type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; + type Block = Block; + type Nonce = u32; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct TestMaxFields; +impl Get for TestMaxFields { + fn get() -> u32 { + 16 + } +} +impl TypeInfo for TestMaxFields { + type Identity = Self; + fn type_info() -> scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("TestMaxFields", module_path!())) + .composite(scale_info::build::Fields::unit()) + } +} + +pub struct TestCanCommit; +impl pallet_commitments::CanCommit for TestCanCommit { + fn can_commit(_netuid: u16, _who: &u64) -> bool { + true + } } impl pallet_commitments::Config for Test { type RuntimeEvent = RuntimeEvent; + type Currency = Balances; type WeightInfo = (); - type MaxAdditionalFields = frame_support::traits::ConstU32<16>; - type CanRegisterIdentity = (); + type MaxFields = TestMaxFields; + type CanCommit = TestCanCommit; + type FieldDeposit = ConstU64<0>; + type InitialDeposit = ConstU64<0>; + type DefaultRateLimit = ConstU64<0>; + type TempoInterface = MockTempoInterface; +} + +pub struct MockTempoInterface; +impl pallet_commitments::GetTempoInterface for MockTempoInterface { + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + let tempo = 360; // TODO: configure SubtensorModule in this mock + let tempo_plus_one: u64 = tempo.saturating_add(1); + let netuid_plus_one: u64 = (netuid as u64).saturating_add(1); + let block_with_offset: u64 = cur_block.saturating_add(netuid_plus_one); + + block_with_offset.checked_div(tempo_plus_one).unwrap_or(0) + } +} + +impl pallet_drand::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_drand::weights::SubstrateWeight; + type AuthorityId = test_crypto::TestAuthId; + type Verifier = pallet_drand::verifier::QuicknetVerifier; + type UnsignedPriority = ConstU64<{ 1 << 20 }>; + type HttpFetchTimeout = ConstU64<1_000>; +} + +pub mod test_crypto { + use sp_core::sr25519::{Public as Sr25519Public, Signature as Sr25519Signature}; + use sp_runtime::{ + app_crypto::{app_crypto, sr25519}, + traits::IdentifyAccount, + }; + + pub const KEY_TYPE: sp_runtime::KeyTypeId = sp_runtime::KeyTypeId(*b"test"); + + app_crypto!(sr25519, KEY_TYPE); + + pub struct TestAuthId; + + impl frame_system::offchain::AppCrypto for TestAuthId { + type RuntimeAppPublic = Public; + type GenericSignature = Sr25519Signature; + type GenericPublic = Sr25519Public; + } + + impl IdentifyAccount for Public { + type AccountId = u64; + + fn into_account(self) -> u64 { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(self.as_ref()); + u64::from_le_bytes(bytes[..8].try_into().expect("Expected to not panic")) + } + } +} + +impl frame_system::offchain::SigningTypes for Test { + type Public = test_crypto::Public; + type Signature = test_crypto::Signature; +} + +impl frame_system::offchain::CreateSignedTransaction> for Test { + fn create_transaction>( + call: RuntimeCall, + _public: Self::Public, + account: Self::AccountId, + _nonce: u32, + ) -> Option<( + RuntimeCall, + ::SignaturePayload, + )> { + // Create a dummy sr25519 signature from a raw byte array + let dummy_raw = [0u8; 64]; + let dummy_signature = sp_core::sr25519::Signature::from(dummy_raw); + let signature = test_crypto::Signature::from(dummy_signature); + Some((call, (account, signature, ()))) + } +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; } -// Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::::default() + let t = frame_system::GenesisConfig::::default() .build_storage() - .unwrap() - .into() + .expect("Expected to not panic"); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +use super::*; +use crate::{EngineBLS, MAX_TIMELOCK_COMMITMENT_SIZE_BYTES, TinyBLS381}; +use ark_serialize::CanonicalSerialize; +use frame_support::BoundedVec; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use tle::{ibe::fullident::Identity, stream_ciphers::AESGCMStreamCipherProvider, tlock::tle}; + +// Drand Quicknet public key and signature for round=1000: +pub const DRAND_QUICKNET_PUBKEY_HEX: &str = "83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6\ + a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809b\ + d274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a"; +pub const DRAND_QUICKNET_SIG_HEX: &str = "b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39"; + +/// Inserts a Drand pulse for `round` with the given `signature_bytes`. +pub fn insert_drand_pulse(round: u64, signature_bytes: &[u8]) { + let sig_bounded: BoundedVec> = signature_bytes + .to_vec() + .try_into() + .expect("Signature within 144 bytes"); + + let randomness_bounded: BoundedVec> = vec![0u8; 32] + .try_into() + .expect("Randomness must be exactly 32 bytes"); + + pallet_drand::Pulses::::insert( + round, + pallet_drand::types::Pulse { + round, + randomness: randomness_bounded, + signature: sig_bounded, + }, + ); +} + +/// Produces a **real** ciphertext by TLE-encrypting `plaintext` for Drand Quicknet `round`. +/// +/// The returned `BoundedVec>` +/// will decrypt if you pass in the valid signature for the same round. +pub fn produce_ciphertext( + plaintext: &[u8], + round: u64, +) -> BoundedVec> { + // 1) Deserialize the known Drand Quicknet public key: + let pub_key_bytes = hex::decode(DRAND_QUICKNET_PUBKEY_HEX).expect("decode pubkey"); + let pub_key = + ::PublicKeyGroup::deserialize_compressed(&pub_key_bytes[..]) + .expect("bad pubkey bytes"); + + // 2) Prepare the identity for that round + // by hashing round.to_be_bytes() with SHA256: + let msg = { + let mut hasher = sha2::Sha256::new(); + hasher.update(round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![msg]); + + // 3) Actually encrypt + // (just an example ephemeral secret key & RNG seed) + let esk = [2u8; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + + let ct = tle::( + pub_key, esk, plaintext, identity, rng, + ) + .expect("Encryption failed in produce_real_ciphertext"); + + // 4) Serialize the ciphertext to BoundedVec + let mut ct_bytes = Vec::new(); + ct.serialize_compressed(&mut ct_bytes) + .expect("serialize TLECiphertext"); + + ct_bytes.try_into().expect("Ciphertext is within max size") } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 15675d8ad8..f03e99080e 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1,100 +1,1264 @@ -#![allow(non_camel_case_types)] - -use crate as pallet_commitments; -use frame_support::derive_impl; -use frame_support::traits::ConstU64; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, ConstU16, IdentityLookup}, +use codec::Encode; +use sp_std::prelude::*; + +#[cfg(test)] +use crate::{ + CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, RateLimit, + Registration, RevealedCommitments, TimelockedIndex, + mock::{ + Balances, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, + new_test_ext, produce_ciphertext, + }, +}; +use frame_support::pallet_prelude::Hooks; +use frame_support::{ + BoundedVec, assert_noop, assert_ok, + traits::{Currency, Get, ReservableCurrency}, }; +use frame_system::Pallet as System; + +#[allow(clippy::indexing_slicing)] +#[test] +fn manual_data_type_info() { + let mut registry = scale_info::Registry::new(); + let type_id = registry.register_type(&scale_info::meta_type::()); + let registry: scale_info::PortableRegistry = registry.into(); + let type_info = registry.resolve(type_id.id).expect("Expected not to panic"); + + let check_type_info = |data: &Data| { + let variant_name = match data { + Data::None => "None".to_string(), + Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), + Data::Sha256(_) => "Sha256".to_string(), + Data::Keccak256(_) => "Keccak256".to_string(), + Data::ShaThree256(_) => "ShaThree256".to_string(), + Data::Raw(bytes) => format!("Raw{}", bytes.len()), + Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), + }; + if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { + let variant = variant + .variants + .iter() + .find(|v| v.name == variant_name) + .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); + + let encoded = data.encode(); + assert_eq!(encoded[0], variant.index); + + // For variants with fields, check the encoded length matches expected field lengths + if !variant.fields.is_empty() { + let expected_len = match data { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u32, + Data::BlakeTwo256(_) + | Data::Sha256(_) + | Data::Keccak256(_) + | Data::ShaThree256(_) => 32, + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + // Calculate length: encrypted (length prefixed) + reveal_round (u64) + let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix + let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes + encrypted_len + reveal_round_len + } + }; + assert_eq!( + encoded.len() as u32 - 1, // Subtract variant byte + expected_len, + "Encoded length mismatch for variant {}", + variant_name + ); + } else { + assert_eq!( + encoded.len() as u32 - 1, + 0, + "Expected no fields for {}", + variant_name + ); + } + } else { + panic!("Should be a variant type"); + } + }; -pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + let mut data = vec![ + Data::None, + Data::BlakeTwo256(Default::default()), + Data::Sha256(Default::default()), + Data::Keccak256(Default::default()), + Data::ShaThree256(Default::default()), + ]; -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system = 1, - Balances: pallet_balances = 2, - Commitments: pallet_commitments = 3, + // Add Raw instances for all possible sizes + for n in 0..128 { + data.push(Data::Raw( + vec![0u8; n as usize] + .try_into() + .expect("Expected not to panic"), + )); } -); - -#[allow(dead_code)] -pub type AccountId = u64; - -// The address format for describing accounts. -#[allow(dead_code)] -pub type Address = AccountId; - -// Balance of an account. -#[allow(dead_code)] -pub type Balance = u64; - -// An index to a block. -#[allow(dead_code)] -pub type BlockNumber = u64; - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - type Block = Block; - type Nonce = u64; -} - -impl pallet_commitments::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type WeightInfo = (); - type MaxFields = frame_support::traits::ConstU32<16>; - type CanCommit = (); - type FieldDeposit = frame_support::traits::ConstU64<0>; - type InitialDeposit = frame_support::traits::ConstU64<0>; - type DefaultRateLimit = frame_support::traits::ConstU64<0>; -} - -// // Build genesis storage according to the mock runtime. -// pub fn new_test_ext() -> sp_io::TestExternalities { -// let t = frame_system::GenesisConfig::::default() -// .build_storage() -// .unwrap(); -// let mut ext = sp_io::TestExternalities::new(t); -// ext.execute_with(|| System::set_block_number(1)); -// ext + + // Add a TimelockEncrypted instance + data.push(Data::TimelockEncrypted { + encrypted: vec![0u8; 64].try_into().expect("Expected not to panic"), + reveal_round: 12345, + }); + + for d in data.iter() { + check_type_info(d); + } +} + +#[test] +fn set_commitment_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + let commitment = Pallet::::commitment_of(1, 1).expect("Expected not to panic"); + let initial_deposit: u64 = ::InitialDeposit::get(); + assert_eq!(commitment.deposit, initial_deposit); + assert_eq!(commitment.block, 1); + assert_eq!(Pallet::::last_commitment(1, 1), Some(1)); + }); +} + +#[test] +#[should_panic(expected = "BoundedVec::try_from failed")] +fn set_commitment_too_many_fields_panics() { + new_test_ext().execute_with(|| { + let max_fields: u32 = ::MaxFields::get(); + let fields = vec![Data::None; (max_fields + 1) as usize]; + + // This line will panic when 'BoundedVec::try_from(...)' sees too many items. + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), + }); + + // We never get here, because the constructor panics above. + let _ = Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); + }); +} + +// DEPRECATED +// #[test] +// fn set_commitment_rate_limit_exceeded() { +// new_test_ext().execute_with(|| { +// let rate_limit = ::DefaultRateLimit::get(); +// System::::set_block_number(1); +// let info = Box::new(CommitmentInfo { +// fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), +// }); + +// assert_ok!(Pallet::::set_commitment( +// RuntimeOrigin::signed(1), +// 1, +// info.clone() +// )); + +// // Set block number to just before rate limit expires +// System::::set_block_number(rate_limit); +// assert_noop!( +// Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), +// Error::::CommitmentSetRateLimitExceeded +// ); + +// // Set block number to after rate limit +// System::::set_block_number(rate_limit + 1); +// assert_ok!(Pallet::::set_commitment( +// RuntimeOrigin::signed(1), +// 1, +// info +// )); +// }); // } + +#[test] +fn set_commitment_updates_deposit() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info1 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 2]) + .expect("Expected not to panic"), + }); + let info2 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 3]) + .expect("Expected not to panic"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info1 + )); + let initial_deposit: u64 = ::InitialDeposit::get(); + let field_deposit: u64 = ::FieldDeposit::get(); + let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, 1) + .expect("Expected not to panic") + .deposit, + expected_deposit1 + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info2 + )); + let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, 1) + .expect("Expected not to panic") + .deposit, + expected_deposit2 + ); + }); +} + +#[test] +fn set_rate_limit_works() { + new_test_ext().execute_with(|| { + let default_rate_limit: u64 = ::DefaultRateLimit::get(); + assert_eq!(RateLimit::::get(), default_rate_limit); + + assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); + assert_eq!(RateLimit::::get(), 200); + + assert_noop!( + Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), + sp_runtime::DispatchError::BadOrigin + ); + }); +} + +#[test] +fn event_emission_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + + let events = System::::events(); + assert!(events.iter().any(|e| matches!( + &e.event, + RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) + ))); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn happy_path_timelock_commitments() { + new_test_ext().execute_with(|| { + let message_text = b"Hello timelock only!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes for Raw variant"), + ); + let fields_vec = vec![data_raw]; + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(fields_vec).expect("Too many fields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let data = Data::TimelockEncrypted { + encrypted: encrypted.clone(), + reveal_round, + }; + + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Too many fields"); + let info_outer = CommitmentInfo { + fields: fields_outer, + }; + + let who = 123; + let netuid = 42; + System::::set_block_number(1); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = + hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let revealed = + RevealedCommitments::::get(netuid, who).expect("Should have revealed data"); + + let revealed_inner = &revealed.info; + assert_eq!(revealed_inner.fields.len(), 1); + match &revealed_inner.fields[0] { + Data::Raw(bounded_bytes) => { + assert_eq!( + bounded_bytes.as_slice(), + message_text, + "Decrypted text from on-chain storage must match the original message" + ); + } + other => panic!("Expected Data::Raw(...) in revealed, got {:?}", other), + } + }); +} + +#[test] +fn reveal_timelocked_commitment_missing_round_does_nothing() { + new_test_ext().execute_with(|| { + let who = 1; + let netuid = 2; + System::::set_block_number(5); + let ciphertext = produce_ciphertext(b"My plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: ciphertext, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + System::::set_block_number(100_000); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_commitment_cant_deserialize_ciphertext() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 9; + System::::set_block_number(10); + let good_ct = produce_ciphertext(b"Some data", 1000); + let mut corrupted = good_ct.into_inner(); + if !corrupted.is_empty() { + corrupted[0] = 0xFF; + } + let corrupted_ct = BoundedVec::try_from(corrupted).expect("Expected not to panic"); + let data = Data::TimelockEncrypted { + encrypted: corrupted_ct, + reveal_round: 1000, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(1000, &sig_bytes); + System::::set_block_number(99999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_bad_signature_skips_decryption() { + new_test_ext().execute_with(|| { + let who = 10; + let netuid = 11; + System::::set_block_number(15); + let real_ct = produce_ciphertext(b"A valid plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let bad_signature = [0x33u8; 10]; + insert_drand_pulse(1000, &bad_signature); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_empty_decrypted_data_is_skipped() { + new_test_ext().execute_with(|| { + let who = 2; + let netuid = 3; + let commit_block = 100u64; + System::::set_block_number(commit_block); + let reveal_round = 1000; + let empty_ct = produce_ciphertext(&[], reveal_round); + let data = Data::TimelockEncrypted { + encrypted: empty_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_decode_failure_is_skipped() { + new_test_ext().execute_with(|| { + let who = 999; + let netuid = 8; + let commit_block = 42u64; + System::::set_block_number(commit_block); + let plaintext = vec![0xAA, 0xBB, 0xCC, 0xDD, 0xEE]; + let reveal_round = 1000; + let real_ct = produce_ciphertext(&plaintext, reveal_round); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = + hex::decode(DRAND_QUICKNET_SIG_HEX.as_bytes()).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_single_field_entry_is_removed_after_reveal() { + new_test_ext().execute_with(|| { + let message_text = b"Single field timelock test!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("Message must be <=128 bytes for Raw variant"), + ); + + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data_raw]).expect("BoundedVec creation must not fail"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let timelock_data = Data::TimelockEncrypted { + encrypted, + reveal_round, + }; + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![timelock_data]).expect("Too many fields"); + let info_outer: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_outer, + }; + + let who = 555; + let netuid = 777; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX) + .expect("Must decode DRAND_QUICKNET_SIG_HEX successfully"); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let revealed = + RevealedCommitments::::get(netuid, who).expect("Expected to find revealed data"); + assert_eq!( + revealed.info.fields.len(), + 1, + "Should have exactly 1 revealed field" + ); + + assert!( + crate::CommitmentOf::::get(netuid, who).is_none(), + "Expected CommitmentOf entry to be removed after reveal" + ); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_multiple_fields_only_correct_ones_removed() { + new_test_ext().execute_with(|| { + let round_1000 = 1000; + + // 2) Build two CommitmentInfos, one for each timelock + let msg_1 = b"Hello from TLE #1"; + let inner_1_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + msg_1.to_vec().try_into().expect("expected not to panic"), + )]) + .expect("BoundedVec of size 1"); + let inner_info_1 = CommitmentInfo { + fields: inner_1_fields, + }; + let encoded_1 = inner_info_1.encode(); + let ciphertext_1 = produce_ciphertext(&encoded_1, round_1000); + let timelock_1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + let msg_2 = b"Hello from TLE #2"; + let inner_2_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + msg_2.to_vec().try_into().expect("expected not to panic"), + )]) + .expect("BoundedVec of size 1"); + let inner_info_2 = CommitmentInfo { + fields: inner_2_fields, + }; + let encoded_2 = inner_info_2.encode(); + let ciphertext_2 = produce_ciphertext(&encoded_2, round_1000); + let timelock_2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_1000, + }; + + // 3) One plain Data::Raw field (non-timelocked) + let raw_bytes = b"Plain non-timelocked data"; + let data_raw = Data::Raw( + raw_bytes + .to_vec() + .try_into() + .expect("expected not to panic"), + ); + + // 4) Outer commitment: 3 fields total => [Raw, TLE #1, TLE #2] + let outer_fields = BoundedVec::try_from(vec![ + data_raw.clone(), + timelock_1.clone(), + timelock_2.clone(), + ]) + .expect("T::MaxFields >= 3 in the test config, or at least 3 here"); + let outer_info = CommitmentInfo { + fields: outer_fields, + }; + + // 5) Insert the commitment + let who = 123; + let netuid = 999; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(outer_info) + )); + let initial = Pallet::::commitment_of(netuid, who).expect("Must exist"); + assert_eq!(initial.info.fields.len(), 3, "3 fields inserted"); + + // 6) Insert Drand signature for round=1000 + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode DRAND sig"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + // 7) Reveal once + System::::set_block_number(50); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + // => The pallet code has removed *both* TLE #1 and TLE #2 in this single call! + let after_reveal = Pallet::::commitment_of(netuid, who) + .expect("Should still exist with leftover fields"); + // Only the raw, non-timelocked field remains + assert_eq!( + after_reveal.info.fields.len(), + 1, + "Both timelocks referencing round=1000 got removed at once" + ); + assert_eq!( + after_reveal.info.fields[0], data_raw, + "Only the raw field is left" + ); + + // 8) Check revealed data + let revealed_data = RevealedCommitments::::get(netuid, who) + .expect("Expected revealed data for TLE #1 and #2"); + + assert_eq!( + revealed_data.info.fields.len(), + 2, + "We revealed both TLE #1 and TLE #2 in the same pass" + ); + let mut found_msg1 = false; + let mut found_msg2 = false; + for item in &revealed_data.info.fields { + if let Data::Raw(bytes) = item { + if bytes.as_slice() == msg_1 { + found_msg1 = true; + } else if bytes.as_slice() == msg_2 { + found_msg2 = true; + } + } + } + assert!( + found_msg1 && found_msg2, + "Should see both TLE #1 and TLE #2 in the revealed data" + ); + + // 9) A second reveal call now does nothing, because no timelocks remain + System::::set_block_number(51); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let after_second = Pallet::::commitment_of(netuid, who).expect("Still must exist"); + assert_eq!( + after_second.info.fields.len(), + 1, + "No new fields were removed, because no timelocks remain" + ); + }); +} + +#[test] +fn test_index_lifecycle_no_timelocks_updates_in_out() { + new_test_ext().execute_with(|| { + let netuid = 100; + let who = 999; + + // + // A) Create a commitment with **no** timelocks => shouldn't be in index + // + let no_tl_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![]).expect("Empty is ok"); + let info_no_tl = CommitmentInfo { + fields: no_tl_fields, + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_no_tl) + )); + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "User with no timelocks must not appear in index" + ); + + // + // B) Update the commitment to have a timelock => enters index + // + let tl_fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![Data::TimelockEncrypted { + encrypted: Default::default(), + reveal_round: 1234, + }]) + .expect("Expected success"); + let info_with_tl = CommitmentInfo { fields: tl_fields }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_with_tl) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "User must appear in index after adding a timelock" + ); + + // + // C) Remove the timelock => leaves index + // + let back_to_no_tl: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![]).expect("Expected success"); + let info_remove_tl = CommitmentInfo { + fields: back_to_no_tl, + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_remove_tl) + )); + + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "User must be removed from index after losing all timelocks" + ); + }); +} + +#[test] +fn two_timelocks_partial_then_full_reveal() { + new_test_ext().execute_with(|| { + let netuid_a = 1; + let who_a = 10; + let round_1000 = 1000; + let round_2000 = 2000; + + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected success"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + + // + // First Timelock => round=1000 + // + let msg_a1 = b"UserA timelock #1 (round=1000)"; + let inner_1_fields: BoundedVec::MaxFields> = BoundedVec::try_from( + vec![Data::Raw(msg_a1.to_vec().try_into().expect("Expected success"))], + ) + .expect("MaxFields >= 1"); + let inner_info_1: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_1_fields, + }; + let encoded_1 = inner_info_1.encode(); + let ciphertext_1 = produce_ciphertext(&encoded_1, round_1000); + let tle_a1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + // + // Second Timelock => round=2000 + // + let msg_a2 = b"UserA timelock #2 (round=2000)"; + let inner_2_fields: BoundedVec::MaxFields> = BoundedVec::try_from( + vec![Data::Raw(msg_a2.to_vec().try_into().expect("Expected success"))], + ) + .expect("MaxFields >= 1"); + let inner_info_2: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_2_fields, + }; + let encoded_2 = inner_info_2.encode(); + let ciphertext_2 = produce_ciphertext(&encoded_2, round_2000); + let tle_a2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_2000, + }; + + // + // Insert outer commitment with both timelocks + // + let fields_a: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![tle_a1, tle_a2]).expect("2 fields, must be <= MaxFields"); + let info_a: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: fields_a }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who_a), + netuid_a, + Box::new(info_a) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "User A must be in index with 2 timelocks" + ); + + System::::set_block_number(10); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_a1 = CommitmentOf::::get(netuid_a, who_a).expect("still there"); + assert_eq!( + leftover_a1.info.fields.len(), + 1, + "Only the round=1000 timelock removed; round=2000 remains" + ); + assert!( + TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "Still in index with leftover timelock" + ); + + // + // Insert signature for round=2000 => final reveal => leftover=none => removed + // + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("Expected success"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(11); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_a2 = CommitmentOf::::get(netuid_a, who_a); + assert!( + leftover_a2.is_none(), + "All timelocks removed => none leftover" + ); + assert!( + !TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "User A removed from index after final reveal" + ); + }); +} + +#[test] +fn single_timelock_reveal_later_round() { + new_test_ext().execute_with(|| { + let netuid_b = 2; + let who_b = 20; + let round_2000 = 2000; + + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("Expected success"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + let msg_b = b"UserB single timelock (round=2000)"; + + let inner_b_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw(msg_b.to_vec().try_into().expect("Expected success"))]) + .expect("MaxFields >= 1"); + let inner_info_b: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_b_fields, + }; + let encoded_b = inner_info_b.encode(); + let ciphertext_b = produce_ciphertext(&encoded_b, round_2000); + let tle_b = Data::TimelockEncrypted { + encrypted: ciphertext_b, + reveal_round: round_2000, + }; + + let fields_b: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![tle_b]).expect("1 field"); + let info_b: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: fields_b }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who_b), + netuid_b, + Box::new(info_b) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "User B in index" + ); + + // Remove the round=2000 signature so first reveal does nothing + pallet_drand::Pulses::::remove(round_2000); + + System::::set_block_number(20); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_b1 = CommitmentOf::::get(netuid_b, who_b).expect("still there"); + assert_eq!( + leftover_b1.info.fields.len(), + 1, + "No signature => timelock remains" + ); + assert!( + TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "Still in index with leftover timelock" + ); + + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(21); + assert_ok!(Pallet::::reveal_timelocked_commitments()); + + let leftover_b2 = CommitmentOf::::get(netuid_b, who_b); + assert!(leftover_b2.is_none(), "Timelock removed => leftover=none"); + assert!( + !TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "User B removed from index after final reveal" + ); + }); +} + +#[test] +fn tempo_based_space_limit_accumulates_in_same_window() { + new_test_ext().execute_with(|| { + let netuid = 1; + let who = 100; + let space_limit = 50; + MaxSpace::::set(space_limit); + System::::set_block_number(0); + + // A single commitment that uses some space, e.g. 30 bytes: + let data = vec![0u8; 30]; + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + data.try_into().expect("Data up to 128 bytes OK"), + )]) + .expect("1 field is <= MaxFields"), + }); + + // 2) First call => usage=0 => usage=30 after. OK. + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + info.clone(), + )); + + // 3) Second call => tries another 30 bytes in the SAME block => total=60 => exceeds 50 => should fail. + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid, info.clone()), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_resets_after_tempo() { + new_test_ext().execute_with(|| { + let netuid = 2; + let who = 101; + + MaxSpace::::set(40); + System::::set_block_number(1); + + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + )); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + System::::set_block_number(200); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + System::::set_block_number(360); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small + )); + }); +} + +#[test] +fn tempo_based_space_limit_does_not_affect_different_netuid() { + new_test_ext().execute_with(|| { + let netuid_a = 10; + let netuid_b = 20; + let who = 111; + let space_limit = 50; + MaxSpace::::set(space_limit); + + let commit_large = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 40].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_a, + commit_large.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_a, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_b, + commit_large + )); + + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid_b, commit_small), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_does_not_affect_different_user() { + new_test_ext().execute_with(|| { + let netuid = 10; + let user1 = 123; + let user2 = 456; + let space_limit = 50; + MaxSpace::::set(space_limit); + + let commit_large = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 40].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user1), + netuid, + commit_large.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(user1), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user2), + netuid, + commit_large + )); + + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(user2), netuid, commit_small), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_sudo_set_max_space() { + new_test_ext().execute_with(|| { + let netuid = 3; + let who = 15; + MaxSpace::::set(30); + + System::::set_block_number(1); + let commit_25 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 25].try_into().expect("expected ok"), + )]) + .expect("expected ok"), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_25.clone() + )); + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid, commit_25.clone()), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_max_space(RuntimeOrigin::root(), 100)); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_25 + )); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn on_initialize_reveals_matured_timelocks() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 7; + let reveal_round = 1000; + + let message_text = b"Timelock test via on_initialize"; + + let inner_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes is OK for Data::Raw"), + )]) + .expect("Should not exceed MaxFields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_fields, + }; + + let plaintext = inner_info.encode(); + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let outer_fields = BoundedVec::try_from(vec![Data::TimelockEncrypted { + encrypted, + reveal_round, + }]) + .expect("One field is well under MaxFields"); + let info_outer = CommitmentInfo { + fields: outer_fields, + }; + + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + assert!(CommitmentOf::::get(netuid, who).is_some()); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "Should appear in TimelockedIndex since it contains a timelock" + ); + + let drand_sig_hex = hex::decode(DRAND_QUICKNET_SIG_HEX) + .expect("Decoding DRAND_QUICKNET_SIG_HEX must not fail"); + insert_drand_pulse(reveal_round, &drand_sig_hex); + + assert!(RevealedCommitments::::get(netuid, who).is_none()); + + System::::set_block_number(2); + as Hooks>::on_initialize(2); + + let revealed_opt = RevealedCommitments::::get(netuid, who); + assert!( + revealed_opt.is_some(), + "Expected that the timelock got revealed at block #2" + ); + + let leftover = CommitmentOf::::get(netuid, who); + assert!( + leftover.is_none(), + "After revealing the only timelock, the entire commitment is removed." + ); + + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "No longer in TimelockedIndex after reveal." + ); + + let revealed_data = revealed_opt.expect("expected to not panic"); + assert_eq!(revealed_data.info.fields.len(), 1); + if let Data::Raw(bound_bytes) = &revealed_data.info.fields[0] { + assert_eq!(bound_bytes.as_slice(), message_text); + } else { + panic!("Expected a Data::Raw variant in revealed data."); + } + }); +} + +#[test] +fn set_commitment_unreserve_leftover_fails() { + new_test_ext().execute_with(|| { + use frame_system::RawOrigin; + + let netuid = 999; + let who = 99; + + Balances::make_free_balance_be(&who, 10_000); + + let fake_deposit = 100; + let dummy_info = CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("empty fields is fine"), + }; + let registration = Registration { + deposit: fake_deposit, + info: dummy_info, + block: 0u64.into(), + }; + + CommitmentOf::::insert(netuid, who, registration); + + assert_ok!(Balances::reserve(&who, fake_deposit)); + assert_eq!(Balances::reserved_balance(who), 100); + + Balances::unreserve(&who, 10_000); + assert_eq!(Balances::reserved_balance(who), 0); + + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("no fields is fine"), + }); + + assert_noop!( + Pallet::::set_commitment(RawOrigin::Signed(who).into(), netuid, commit_small), + Error::::UnexpectedUnreserveLeftover + ); + }); +} diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index bc0531ece4..0f1d2302a5 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -53,12 +53,34 @@ pub enum Data { /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. ShaThree256([u8; 32]), + /// A timelock-encrypted commitment with a reveal round. + TimelockEncrypted { + encrypted: BoundedVec>, + reveal_round: u64, + }, } impl Data { pub fn is_none(&self) -> bool { self == &Data::None } + + /// Check if this is a timelock-encrypted commitment. + pub fn is_timelock_encrypted(&self) -> bool { + matches!(self, Data::TimelockEncrypted { .. }) + } + + pub fn len_for_rate_limit(&self) -> u64 { + match self { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u64, + Data::BlakeTwo256(arr) + | Data::Sha256(arr) + | Data::Keccak256(arr) + | Data::ShaThree256(arr) => arr.len() as u64, + Data::TimelockEncrypted { encrypted, .. } => encrypted.len() as u64, + } + } } impl Decode for Data { @@ -77,6 +99,15 @@ impl Decode for Data { 131 => Data::Sha256(<[u8; 32]>::decode(input)?), 132 => Data::Keccak256(<[u8; 32]>::decode(input)?), 133 => Data::ShaThree256(<[u8; 32]>::decode(input)?), + 134 => { + let encrypted = + BoundedVec::>::decode(input)?; + let reveal_round = u64::decode(input)?; + Data::TimelockEncrypted { + encrypted, + reveal_round, + } + } _ => return Err(codec::Error::from("invalid leading byte")), }) } @@ -96,6 +127,15 @@ impl Encode for Data { Data::Sha256(h) => once(131).chain(h.iter().cloned()).collect(), Data::Keccak256(h) => once(132).chain(h.iter().cloned()).collect(), Data::ShaThree256(h) => once(133).chain(h.iter().cloned()).collect(), + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + let mut r = vec![134]; + r.extend_from_slice(&encrypted.encode()); + r.extend_from_slice(&reveal_round.encode()); + r + } } } } @@ -270,6 +310,17 @@ impl TypeInfo for Data { .variant("ShaThree256", |v| { v.index(133) .fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }) + .variant("TimelockEncrypted", |v| { + v.index(134).fields( + Fields::named() + .field(|f| { + f.name("encrypted") + .ty::>>( + ) + }) + .field(|f| f.name("reveal_round").ty::()), + ) }); Type::builder() @@ -295,6 +346,28 @@ pub struct CommitmentInfo> { pub fields: BoundedVec, } +/// Maximum size of the serialized timelock commitment in bytes +pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; + +/// Contains the decrypted data of a revealed commitment. +#[freeze_struct("bf575857b57f9bef")] +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] +pub struct RevealedData, BlockNumber> { + pub info: CommitmentInfo, + pub revealed_block: BlockNumber, + pub deposit: Balance, +} + +/// Tracks how much “space” each (netuid, who) has used within the current RateLimit block-window. +#[freeze_struct("1f23fb50f96326e4")] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, TypeInfo)] +pub struct UsageTracker { + /// Last epoch block + pub last_epoch: u64, + /// Space used + pub used_space: u64, +} + /// Information concerning the identity of the controller of an account. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a @@ -345,71 +418,3 @@ impl< }) } } - -#[cfg(test)] -#[allow(clippy::indexing_slicing, clippy::unwrap_used)] -mod tests { - use super::*; - - #[test] - fn manual_data_type_info() { - let mut registry = scale_info::Registry::new(); - let type_id = registry.register_type(&scale_info::meta_type::()); - let registry: scale_info::PortableRegistry = registry.into(); - let type_info = registry.resolve(type_id.id).unwrap(); - - let check_type_info = |data: &Data| { - let variant_name = match data { - Data::None => "None".to_string(), - Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), - Data::Sha256(_) => "Sha256".to_string(), - Data::Keccak256(_) => "Keccak256".to_string(), - Data::ShaThree256(_) => "ShaThree256".to_string(), - Data::Raw(bytes) => format!("Raw{}", bytes.len()), - }; - if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { - let variant = variant - .variants - .iter() - .find(|v| v.name == variant_name) - .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); - - let field_arr_len = variant - .fields - .first() - .and_then(|f| registry.resolve(f.ty.id)) - .map(|ty| { - if let scale_info::TypeDef::Array(arr) = &ty.type_def { - arr.len - } else { - panic!("Should be an array type") - } - }) - .unwrap_or(0); - - let encoded = data.encode(); - assert_eq!(encoded[0], variant.index); - assert_eq!(encoded.len() as u32 - 1, field_arr_len); - } else { - panic!("Should be a variant type") - }; - }; - - let mut data = vec![ - Data::None, - Data::BlakeTwo256(Default::default()), - Data::Sha256(Default::default()), - Data::Keccak256(Default::default()), - Data::ShaThree256(Default::default()), - ]; - - // A Raw instance for all possible sizes of the Raw data - for n in 0..128 { - data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())) - } - - for d in data.iter() { - check_type_info(d); - } - } -} diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 30d1f39e11..8d4457b0c9 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -6,7 +6,7 @@ use crate::Pallet as Subtensor; use crate::*; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::assert_ok; -use frame_system::RawOrigin; +use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; pub use pallet::*; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Hash}; @@ -594,5 +594,99 @@ batch_reveal_weights { version_keys ) +benchmark_recycle_alpha { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + + // Add alpha to the hotkey + let alpha_amount: u64 = 1000000; + TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + // Verify the alpha has been added + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + +}: recycle_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) + +benchmark_burn_alpha { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid = 1; + let tempo = 1; + let seed = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + + // Add alpha to the hotkey + let alpha_amount: u64 = 1000000; + TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + // Verify the alpha has been added + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + +}: burn_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) + + +benchmark_start_call { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + let current_block: u64 = Subtensor::::get_current_block_as_u64(); + let duration = ::DurationOfStartCall::get(); + let block: BlockNumberFor = (current_block + duration).try_into().ok().expect("can't convert to block number"); + frame_system::Pallet::::set_block_number(block); + +}: start_call(RawOrigin::Signed(coldkey), netuid) } diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 7836423868..1ff8b2760d 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -37,16 +37,24 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::debug!("Current block: {:?}", current_block); - // --- 1. Get all netuids (filter out root.) + // --- 1. Get all netuids (filter out root) let subnets: Vec = Self::get_all_subnet_netuids() .into_iter() .filter(|netuid| *netuid != 0) .collect(); log::debug!("All subnet netuids: {:?}", subnets); + // Filter out subnets with no first emission block number. + let subnets_to_emit_to: Vec = subnets + .clone() + .into_iter() + .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) + .collect(); + log::debug!("Subnets to emit to: {:?}", subnets_to_emit_to); // --- 2. Get sum of tao reserves ( in a later version we will switch to prices. ) let mut total_moving_prices: I96F32 = I96F32::saturating_from_num(0.0); - for netuid_i in subnets.iter() { + // Only get price EMA for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { // Get and update the moving price of each subnet adding the total together. total_moving_prices = total_moving_prices.saturating_add(Self::get_moving_alpha_price(*netuid_i)); @@ -58,7 +66,8 @@ impl Pallet { let mut tao_in: BTreeMap = BTreeMap::new(); let mut alpha_in: BTreeMap = BTreeMap::new(); let mut alpha_out: BTreeMap = BTreeMap::new(); - for netuid_i in subnets.iter() { + // Only calculate for subnets that we are emitting to. + for netuid_i in subnets_to_emit_to.iter() { // Get subnet price. let price_i: I96F32 = Self::get_alpha_price(*netuid_i); log::debug!("price_i: {:?}", price_i); @@ -103,7 +112,7 @@ impl Pallet { // --- 4. Injection. // Actually perform the injection of alpha_in, alpha_out and tao_in into the subnet pool. // This operation changes the pool liquidity each block. - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Inject Alpha in. let alpha_in_i: u64 = tou64!(*alpha_in.get(netuid_i).unwrap_or(&asfloat!(0))); SubnetAlphaInEmission::::insert(*netuid_i, alpha_in_i); @@ -135,7 +144,7 @@ impl Pallet { // Owner cuts are accumulated and then fed to the drain at the end of this func. let cut_percent: I96F32 = Self::get_float_subnet_owner_cut(); let mut owner_cuts: BTreeMap = BTreeMap::new(); - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Get alpha out. let alpha_out_i: I96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0)); log::debug!("alpha_out_i: {:?}", alpha_out_i); @@ -154,7 +163,7 @@ impl Pallet { // --- 6. Seperate out root dividends in alpha and sell them into tao. // Then accumulate those dividends for later. - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Get remaining alpha out. let alpha_out_i: I96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0.0)); log::debug!("alpha_out_i: {:?}", alpha_out_i); @@ -199,12 +208,14 @@ impl Pallet { } // --- 7 Update moving prices after using them in the emission calculation. - for netuid_i in subnets.iter() { + // Only update price EMA for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { // Update moving prices after using them above. Self::update_moving_price(*netuid_i); } // --- 7. Drain pending emission through the subnet based on tempo. + // Run the epoch for *all* subnets, even if we don't emit anything. for &netuid in subnets.iter() { // Pass on subnets that have not reached their tempo. if Self::should_run_epoch(netuid, current_block) { @@ -251,27 +262,10 @@ impl Pallet { } } - pub fn drain_pending_emission( + pub fn calculate_dividends_and_incentives( netuid: u16, - pending_alpha: u64, - pending_tao: u64, - pending_swapped: u64, - owner_cut: u64, - ) { - log::debug!( - "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut - ); - - // Run the epoch. - let hotkey_emission: Vec<(T::AccountId, u64, u64)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); - log::debug!("hotkey_emission: {:?}", hotkey_emission); - + hotkey_emission: Vec<(T::AccountId, u64, u64)>, + ) -> (BTreeMap, BTreeMap) { // Accumulate emission of dividends and incentive per hotkey. let mut incentives: BTreeMap = BTreeMap::new(); let mut dividends: BTreeMap = BTreeMap::new(); @@ -283,7 +277,7 @@ impl Pallet { .or_insert(incentive); // Accumulate dividends to parents. let div_tuples: Vec<(T::AccountId, u64)> = - Self::get_dividends_distribution(&hotkey, netuid, dividend); + Self::get_parent_child_dividends_distribution(&hotkey, netuid, dividend); // Accumulate dividends per hotkey. for (parent, parent_div) in div_tuples { dividends @@ -295,64 +289,71 @@ impl Pallet { log::debug!("incentives: {:?}", incentives); log::debug!("dividends: {:?}", dividends); - Self::distribute_dividends_and_incentives( - netuid, - pending_tao, - owner_cut, - incentives, - dividends, - ); + (incentives, dividends) } - pub fn distribute_dividends_and_incentives( - netuid: u16, + pub fn calculate_dividend_distribution( + pending_alpha: u64, pending_tao: u64, - owner_cut: u64, - incentives: BTreeMap, + tao_weight: I96F32, + stake_map: BTreeMap, dividends: BTreeMap, + ) -> ( + BTreeMap, + BTreeMap, ) { + log::debug!("dividends: {:?}", dividends); + log::debug!("stake_map: {:?}", stake_map); + log::debug!("pending_alpha: {:?}", pending_alpha); + log::debug!("pending_tao: {:?}", pending_tao); + log::debug!("tao_weight: {:?}", tao_weight); + // Setup. let zero: I96F32 = asfloat!(0.0); // Accumulate root divs and alpha_divs. For each hotkey we compute their // local and root dividend proportion based on their alpha_stake/root_stake let mut total_root_divs: I96F32 = asfloat!(0); + let mut total_alpha_divs: I96F32 = asfloat!(0); let mut root_dividends: BTreeMap = BTreeMap::new(); let mut alpha_dividends: BTreeMap = BTreeMap::new(); for (hotkey, dividend) in dividends { - // Get hotkey ALPHA on subnet. - let alpha_stake = asfloat!(Self::get_stake_for_hotkey_on_subnet(&hotkey, netuid)); - // Get hotkey TAO on root. - let root_stake: I96F32 = asfloat!(Self::get_stake_for_hotkey_on_subnet( - &hotkey, - Self::get_root_netuid() - )); - // Convert TAO to alpha with weight. - let root_alpha: I96F32 = root_stake.saturating_mul(Self::get_tao_weight()); - // Get total from root and local - let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); - // Copmute root prop. - let root_prop: I96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); - // Compute root dividends - let root_divs: I96F32 = dividend.saturating_mul(root_prop); - // Compute alpha dividends - let alpha_divs: I96F32 = dividend.saturating_sub(root_divs); - // Record the alpha dividends. - alpha_dividends - .entry(hotkey.clone()) - .and_modify(|e| *e = e.saturating_add(alpha_divs)) - .or_insert(alpha_divs); - // Record the root dividends. - root_dividends - .entry(hotkey.clone()) - .and_modify(|e| *e = e.saturating_add(root_divs)) - .or_insert(root_divs); - // Accumulate total root divs. - total_root_divs = total_root_divs.saturating_add(root_divs); + if let Some((alpha_stake_u64, root_stake_u64)) = stake_map.get(&hotkey) { + // Get hotkey ALPHA on subnet. + let alpha_stake: I96F32 = asfloat!(*alpha_stake_u64); + // Get hotkey TAO on root. + let root_stake: I96F32 = asfloat!(*root_stake_u64); + + // Convert TAO to alpha with weight. + let root_alpha: I96F32 = root_stake.saturating_mul(tao_weight); + // Get total from root and local + let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); + // Compute root prop. + let root_prop: I96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); + // Compute root dividends + let root_divs: I96F32 = dividend.saturating_mul(root_prop); + // Compute alpha dividends + let alpha_divs: I96F32 = dividend.saturating_sub(root_divs); + // Record the alpha dividends. + alpha_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = e.saturating_add(alpha_divs)) + .or_insert(alpha_divs); + // Accumulate total alpha divs. + total_alpha_divs = total_alpha_divs.saturating_add(alpha_divs); + // Record the root dividends. + root_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = e.saturating_add(root_divs)) + .or_insert(root_divs); + // Accumulate total root divs. + total_root_divs = total_root_divs.saturating_add(root_divs); + } } log::debug!("alpha_dividends: {:?}", alpha_dividends); log::debug!("root_dividends: {:?}", root_dividends); log::debug!("total_root_divs: {:?}", total_root_divs); + log::debug!("total_alpha_divs: {:?}", total_alpha_divs); // Compute root divs as TAO. Here we take let mut tao_dividends: BTreeMap = BTreeMap::new(); @@ -371,6 +372,34 @@ impl Pallet { } log::debug!("tao_dividends: {:?}", tao_dividends); + // Compute proportional alpha divs using the pending alpha and total alpha divs from the epoch. + let mut prop_alpha_dividends: BTreeMap = BTreeMap::new(); + for (hotkey, alpha_divs) in alpha_dividends { + // Alpha proportion. + let alpha_share: I96F32 = alpha_divs.checked_div(total_alpha_divs).unwrap_or(zero); + log::debug!("hotkey: {:?}, alpha_share: {:?}", hotkey, alpha_share); + + // Compute the proportional pending_alpha to this hotkey. + let prop_alpha: I96F32 = asfloat!(pending_alpha).saturating_mul(alpha_share); + log::debug!("hotkey: {:?}, prop_alpha: {:?}", hotkey, prop_alpha); + // Record the proportional alpha dividends. + prop_alpha_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = prop_alpha) + .or_insert(prop_alpha); + } + log::debug!("prop_alpha_dividends: {:?}", prop_alpha_dividends); + + (prop_alpha_dividends, tao_dividends) + } + + pub fn distribute_dividends_and_incentives( + netuid: u16, + owner_cut: u64, + incentives: BTreeMap, + alpha_dividends: BTreeMap, + tao_dividends: BTreeMap, + ) { // Distribute the owner cut. if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { @@ -467,6 +496,114 @@ impl Pallet { } } + pub fn get_stake_map( + netuid: u16, + hotkeys: Vec<&T::AccountId>, + ) -> BTreeMap { + let mut stake_map: BTreeMap = BTreeMap::new(); + for hotkey in hotkeys { + // Get hotkey ALPHA on subnet. + let alpha_stake: u64 = Self::get_stake_for_hotkey_on_subnet(hotkey, netuid); + // Get hotkey TAO on root. + let root_stake: u64 = + Self::get_stake_for_hotkey_on_subnet(hotkey, Self::get_root_netuid()); + stake_map.insert(hotkey.clone(), (alpha_stake, root_stake)); + } + stake_map + } + + pub fn calculate_dividend_and_incentive_distribution( + netuid: u16, + pending_tao: u64, + pending_validator_alpha: u64, + hotkey_emission: Vec<(T::AccountId, u64, u64)>, + tao_weight: I96F32, + ) -> ( + BTreeMap, + ( + BTreeMap, + BTreeMap, + ), + ) { + let (incentives, dividends) = + Self::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let stake_map: BTreeMap = + Self::get_stake_map(netuid, dividends.keys().collect::>()); + + let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + (incentives, (alpha_dividends, tao_dividends)) + } + + pub fn drain_pending_emission( + netuid: u16, + pending_alpha: u64, + pending_tao: u64, + pending_swapped: u64, + owner_cut: u64, + ) { + log::debug!( + "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", + netuid, + pending_alpha, + pending_tao, + pending_swapped, + owner_cut + ); + + let tao_weight = Self::get_tao_weight(); + + // Run the epoch. + let hotkey_emission: Vec<(T::AccountId, u64, u64)> = + Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + log::debug!("hotkey_emission: {:?}", hotkey_emission); + + // Compute the pending validator alpha. + // This is the total alpha being injected, + // minus the the alpha for the miners, (50%) + // and minus the alpha swapped for TAO (pending_swapped). + // Important! If the incentives are 0, then Validators get 100% of the alpha. + let incentive_sum = hotkey_emission + .iter() + .map(|(_, incentive, _)| incentive) + .sum::(); + log::debug!("incentive_sum: {:?}", incentive_sum); + + let pending_validator_alpha: u64 = if incentive_sum != 0 { + pending_alpha + .saturating_add(pending_swapped) + .saturating_div(2) + .saturating_sub(pending_swapped) + } else { + // If the incentive is 0, then Validators get 100% of the alpha. + pending_alpha + }; + + let (incentives, (alpha_dividends, tao_dividends)) = + Self::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + Self::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + alpha_dividends, + tao_dividends, + ); + } + /// Returns the self contribution of a hotkey on a subnet. /// This is the portion of the hotkey's stake that is provided by itself, and not delegated to other hotkeys. pub fn get_self_contribution(hotkey: &T::AccountId, netuid: u16) -> u64 { @@ -515,7 +652,7 @@ impl Pallet { /// # Returns /// * dividend_tuples: `Vec<(T::AccountId, u64)>` - Vector of (hotkey, divs) for each parent including self. /// - pub fn get_dividends_distribution( + pub fn get_parent_child_dividends_distribution( hotkey: &T::AccountId, netuid: u16, dividends: u64, diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 436dba84a0..b4f23ced83 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -549,6 +549,24 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { }); } +// Apply column mask to matrix, mask=true will mask out, i.e. set to 0. +// Assumes each column has the same length. +#[allow(dead_code)] +pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { + let Some(first_row) = matrix.first() else { + return; + }; + assert_eq!(mask.len(), first_row.len()); + let zero: I32F32 = I32F32::saturating_from_num(0); + matrix.iter_mut().for_each(|row_elem| { + row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| { + if *mask_col { + *elem = zero; + } + }); + }); +} + // Mask out the diagonal of the input matrix in-place. #[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut [Vec]) { @@ -569,6 +587,26 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { }); } +// Remove cells from sparse matrix where the mask function of a scalar and a vector is true. +#[allow(dead_code, clippy::indexing_slicing)] +pub fn scalar_vec_mask_sparse_matrix( + sparse_matrix: &[Vec<(u16, I32F32)>], + scalar: u64, + vector: &[u64], + mask_fn: &dyn Fn(u64, u64) -> bool, +) -> Vec> { + let n: usize = sparse_matrix.len(); + let mut result: Vec> = vec![vec![]; n]; + for (i, sparse_row) in sparse_matrix.iter().enumerate() { + for (j, value) in sparse_row { + if !mask_fn(scalar, vector[*j as usize]) { + result[i].push((*j, *value)); + } + } + } + result +} + // Mask out the diagonal of the input matrix in-place, except for the diagonal entry at except_index. #[allow(dead_code)] pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: u16) { diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 27dd17fa0b..62027f9636 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -22,6 +22,10 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::trace!("current_block:\n{:?}\n", current_block); + // Get tempo. + let tempo: u64 = Self::get_tempo(netuid).into(); + log::trace!("tempo: {:?}", tempo); + // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; log::trace!("activity_cutoff:\n{:?}\n", activity_cutoff); @@ -44,7 +48,7 @@ impl Pallet { let block_at_registration: Vec = Self::get_block_at_registration(netuid); log::trace!("Block at registration:\n{:?}\n", &block_at_registration); - // Outdated matrix, updated_ij=True if i has last updated (weights) after j has last registered. + // Outdated matrix, outdated_ij=True if i has last updated (weights) after j has last registered. let outdated: Vec> = last_update .iter() .map(|updated| { @@ -56,6 +60,16 @@ impl Pallet { .collect(); log::trace!("Outdated:\n{:?}\n", &outdated); + // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + let recently_registered: Vec = block_at_registration + .iter() + .map(|registered| last_tempo <= *registered) + .collect(); + log::trace!("Recently registered:\n{:?}\n", &recently_registered); + // =========== // == Stake == // =========== @@ -185,7 +199,8 @@ impl Pallet { // Access network bonds. let mut bonds: Vec> = Self::get_bonds(netuid); - inplace_mask_matrix(&outdated, &mut bonds); // mask outdated bonds + // Remove bonds referring to neurons that have registered since last tempo. + inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 log::trace!("B:\n{:?}\n", &bonds); @@ -386,6 +401,10 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::trace!("current_block: {:?}", current_block); + // Get tempo. + let tempo: u64 = Self::get_tempo(netuid).into(); + log::trace!("tempo:\n{:?}\n", tempo); + // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; log::trace!("activity_cutoff: {:?}", activity_cutoff); @@ -548,12 +567,15 @@ impl Pallet { let mut bonds: Vec> = Self::get_bonds_sparse(netuid); log::trace!("B: {:?}", &bonds); - // Remove bonds referring to deregistered neurons. - bonds = vec_mask_sparse_matrix( + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( &bonds, - &last_update, + last_tempo, &block_at_registration, - &|updated, registered| updated <= registered, + &|last_tempo, registered| last_tempo <= registered, ); log::trace!("B (outdatedmask): {:?}", &bonds); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 03438aa637..1ec9cadb0a 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1125,7 +1125,11 @@ pub mod pallet { /// ============================ /// ==== Subnet Parameters ===== /// ============================ - #[pallet::storage] // --- MAP ( netuid ) --> subnet mechanism + /// --- MAP ( netuid ) --> block number of first emission + #[pallet::storage] + pub type FirstEmissionBlockNumber = StorageMap<_, Identity, u16, u64, OptionQuery>; + /// --- MAP ( netuid ) --> subnet mechanism + #[pallet::storage] pub type SubnetMechanism = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultZeroU16>; #[pallet::storage] diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index af448c8771..cf4d97b65b 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -210,5 +210,8 @@ mod config { /// Initial EMA price halving period #[pallet::constant] type InitialEmaPriceHalvingPeriod: Get; + /// Block number after a new subnet accept the start call extrinsic. + #[pallet::constant] + type DurationOfStartCall: Get; } } diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index bcd2bb33f5..9158073e17 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1909,5 +1909,74 @@ mod dispatches { Ok(()) } + + /// Initiates a call on a subnet. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be signed by the subnet owner. + /// * `netuid` - The unique identifier of the subnet on which the call is being initiated. + /// + /// # Events + /// Emits a `FirstEmissionBlockNumberSet` event on success. + #[pallet::call_index(92)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(6, 1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { + Self::do_start_call(origin, netuid)?; + Ok(()) + } + + /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet + /// + /// # Arguments + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of alpha to recycle + /// * `netuid` - The subnet ID + /// + /// # Events + /// Emits a `TokensRecycled` event on success. + #[pallet::call_index(101)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 2)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn recycle_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + Self::do_recycle_alpha(origin, hotkey, amount, netuid) + } + + /// Burns alpha from a cold/hot key pair without reducing `AlphaOut` + /// + /// # Arguments + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of alpha to burn + /// * `netuid` - The subnet ID + /// + /// # Events + /// Emits a `TokensBurned` event on success. + #[pallet::call_index(102)] + #[pallet::weight(( + Weight::from_parts(2_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn burn_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + Self::do_burn_alpha(origin, hotkey, amount, netuid) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 1f189cd2f6..3404b36d8d 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -195,5 +195,13 @@ mod errors { ActivityCutoffTooLow, /// Call is disabled CallDisabled, + /// FirstEmissionBlockNumber is already set. + FirstEmissionBlockNumberAlreadySet, + /// need wait for more blocks to accept the start call extrinsic. + NeedWaitingMoreBlocksToStarCall, + /// Not enough AlphaOut on the subnet to recycle + NotEnoughAlphaOutToRecycle, + /// Cannot burn or recycle TAO from root subnet + CannotBurnOrRecycleOnRootSubnet, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 834aa901fa..e756145f3a 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -14,9 +14,9 @@ mod events { /// a network is removed. NetworkRemoved(u16), /// stake has been transferred from the a coldkey account onto the hotkey staking account. - StakeAdded(T::AccountId, T::AccountId, u64, u64, u16), + StakeAdded(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been removed from the hotkey staking account onto the coldkey account. - StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16), + StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been moved from origin (hotkey, subnet ID) to destination (hotkey, subnet ID) of this amount (in TAO). StakeMoved(T::AccountId, T::AccountId, u16, T::AccountId, u16, u64), /// a caller successfully sets their weights on a subnetwork. @@ -275,5 +275,23 @@ mod events { /// Parameters: /// (netuid, new_hotkey) SubnetOwnerHotkeySet(u16, T::AccountId), + /// FirstEmissionBlockNumber is set via start call extrinsic + /// + /// Parameters: + /// netuid + /// block number + FirstEmissionBlockNumberSet(u16, u64), + + /// Alpha has been recycled, reducing AlphaOut on a subnet. + /// + /// Parameters: + /// (coldkey, hotkey, amount, subnet_id) + AlphaRecycled(T::AccountId, T::AccountId, u64, u16), + + /// Alpha have been burned without reducing AlphaOut. + /// + /// Parameters: + /// (coldkey, hotkey, amount, subnet_id) + AlphaBurned(T::AccountId, T::AccountId, u64, u16), } } diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index df9dffabca..834e6c86bb 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -81,7 +81,11 @@ mod hooks { // Remove Stake map entries .saturating_add(migrations::migrate_remove_stake_map::migrate_remove_stake_map::()) // Remove unused maps entries - .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()); + .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()) + // Set last emission block number for all existed subnets before start call feature applied + .saturating_add(migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::()) + // Remove all zero value entries in TotalHotkeyAlpha + .saturating_add(migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs new file mode 100644 index 0000000000..3b45615bf4 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs @@ -0,0 +1,60 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_remove_zero_total_hotkey_alpha() -> Weight { + let migration_name = b"migrate_remove_zero_total_hotkey_alpha".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // ------------------------------ + // Step 0: Check if already run + // ------------------------------ + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Remove any zero entries in TotalHotkeyAlpha + // ------------------------------ + + let mut removed_entries_count = 0u64; + + // For each (hotkey, netuid, alpha) entry, remove if alpha == 0 + for (hotkey, netuid, alpha) in TotalHotkeyAlpha::::iter() { + if alpha == 0 { + TotalHotkeyAlpha::::remove(&hotkey, netuid); + removed_entries_count = removed_entries_count.saturating_add(1); + } + } + + weight = weight.saturating_add(T::DbWeight::get().reads(removed_entries_count)); + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count)); + + log::info!( + "Removed {} zero entries from TotalHotkeyAlpha.", + removed_entries_count + ); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs b/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs new file mode 100644 index 0000000000..04ad306218 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs @@ -0,0 +1,53 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_set_first_emission_block_number() -> Weight { + let migration_name = b"migrate_set_first_emission_block_number".to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{:?}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Set the first emission block for all subnets except root + // ------------------------------ + let netuids = Pallet::::get_all_subnet_netuids(); + let current_block_number = Pallet::::get_current_block_as_u64(); + for netuid in netuids.iter() { + if *netuid != 0 { + FirstEmissionBlockNumber::::insert(netuid, current_block_number); + } + } + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().reads(2)); + + if netuids.is_empty() { + weight = weight.saturating_add(T::DbWeight::get().writes(1_u64)); + } else { + weight = weight.saturating_add(T::DbWeight::get().writes(netuids.len() as u64)); + } + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 6af6ad2a56..b342d54979 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -11,6 +11,8 @@ pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_remove_stake_map; pub mod migrate_remove_unused_maps_and_values; +pub mod migrate_remove_zero_total_hotkey_alpha; +pub mod migrate_set_first_emission_block_number; pub mod migrate_set_min_burn; pub mod migrate_set_min_difficulty; pub mod migrate_stake_threshold; diff --git a/pallets/subtensor/src/staking/mod.rs b/pallets/subtensor/src/staking/mod.rs index ecf8fb8815..570658631a 100644 --- a/pallets/subtensor/src/staking/mod.rs +++ b/pallets/subtensor/src/staking/mod.rs @@ -5,6 +5,7 @@ pub mod decrease_take; pub mod helpers; pub mod increase_take; pub mod move_stake; +pub mod recycle_alpha; pub mod remove_stake; pub mod set_children; pub mod stake_utils; diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs new file mode 100644 index 0000000000..b5e6762e6a --- /dev/null +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -0,0 +1,136 @@ +use super::*; +use crate::{Error, system::ensure_signed}; + +impl Pallet { + /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet + /// + /// # Arguments + /// + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of alpha to recycle + /// * `netuid` - The subnet ID from which to reduce AlphaOut + /// + /// # Returns + /// + /// * `DispatchResult` - Success or error + pub(crate) fn do_recycle_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + let coldkey: T::AccountId = ensure_signed(origin)?; + + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + ensure!( + netuid != Self::get_root_netuid(), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + // Ensure that the hotkey account exists this is only possible through registration. + ensure!( + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists + ); + + // Ensure that the hotkey has enough stake to withdraw. + ensure!( + Self::has_enough_stake_on_subnet(&hotkey, &coldkey, netuid, amount), + Error::::NotEnoughStakeToWithdraw + ); + + ensure!( + SubnetAlphaOut::::get(netuid) >= amount, + Error::::InsufficientLiquidity + ); + + // Deduct from the coldkey's stake. + let actual_alpha_decrease = Self::decrease_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, amount, + ); + + // Recycle means we should decrease the alpha issuance tracker. + SubnetAlphaOut::::mutate(netuid, |total| { + *total = total.saturating_sub(actual_alpha_decrease); + }); + + Self::deposit_event(Event::AlphaRecycled( + coldkey, + hotkey, + actual_alpha_decrease, + netuid, + )); + + Ok(()) + } + + /// Burns alpha from a cold/hot key pair without reducing AlphaOut + /// + /// # Arguments + /// + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The "up to" amount of alpha to burn + /// * `netuid` - The subnet ID + /// + /// # Returns + /// + /// * `DispatchResult` - Success or error + pub(crate) fn do_burn_alpha( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + ensure!( + netuid != Self::get_root_netuid(), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + // Ensure that the hotkey account exists this is only possible through registration. + ensure!( + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists + ); + + // Ensure that the hotkey has enough stake to withdraw. + ensure!( + Self::has_enough_stake_on_subnet(&hotkey, &coldkey, netuid, amount), + Error::::NotEnoughStakeToWithdraw + ); + + ensure!( + SubnetAlphaOut::::get(netuid) >= amount, + Error::::InsufficientLiquidity + ); + + // Deduct from the coldkey's stake. + let actual_alpha_decrease = Self::decrease_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, amount, + ); + + // This is a burn, so we don't need to update AlphaOut. + + // Deposit event + Self::deposit_event(Event::AlphaBurned( + coldkey, + hotkey, + actual_alpha_decrease, + netuid, + )); + + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index 894a5a9132..7757851649 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -794,14 +794,16 @@ impl Pallet { tao_unstaked, actual_alpha_decrease, netuid, + actual_fee, )); log::debug!( - "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?} )", + "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", coldkey.clone(), hotkey.clone(), tao_unstaked, actual_alpha_decrease, - netuid + netuid, + actual_fee ); // Step 6: Return the amount of TAO unstaked. @@ -857,14 +859,16 @@ impl Pallet { tao_staked, actual_alpha, netuid, + actual_fee, )); log::debug!( - "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?} )", + "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", coldkey.clone(), hotkey.clone(), tao_staked, actual_alpha, - netuid + netuid, + actual_fee ); // Step 7: Return the amount of alpha staked @@ -1097,7 +1101,7 @@ impl Pallet { DefaultStakingFee::::get() } else { // Otherwise, calculate the fee based on the alpha estimate - let fee = alpha_estimate + let mut fee = alpha_estimate .saturating_mul( I96F32::saturating_from_num(AlphaDividendsPerSubnet::::get( origin_netuid, @@ -1110,6 +1114,16 @@ impl Pallet { .saturating_mul(Self::get_alpha_price(origin_netuid)) // fee needs to be in TAO .saturating_to_num::(); + // 0.005% per epoch matches to 44% annual in compound interest. Do not allow the fee + // to be lower than that. (1.00005^(365*20) ~= 1.44) + let apr_20_percent = I96F32::saturating_from_num(0.00005); + fee = fee.max( + alpha_estimate + .saturating_mul(apr_20_percent) + .saturating_to_num::(), + ); + + // We should at least get DefaultStakingFee anyway fee.max(DefaultStakingFee::::get()) } } diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index bf1806da14..e4721c03f5 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -239,7 +239,7 @@ impl Pallet { netuid_to_register, mechid ); - Self::deposit_event(Event::NetworkAdded(netuid_to_register, 0)); + Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); // --- 17. Return success. Ok(()) @@ -272,7 +272,6 @@ impl Pallet { Self::set_target_registrations_per_interval(netuid, 1); Self::set_adjustment_alpha(netuid, 17_893_341_751_498_265_066); // 18_446_744_073_709_551_615 * 0.97 = 17_893_341_751_498_265_066 Self::set_immunity_period(netuid, 5000); - Self::set_min_burn(netuid, 1); Self::set_min_difficulty(netuid, u64::MAX); Self::set_max_difficulty(netuid, u64::MAX); @@ -320,4 +319,58 @@ impl Pallet { ); } } + + /// Execute the start call for a subnet. + /// + /// This function is used to trigger the start call process for a subnet identified by `netuid`. + /// It ensures that the subnet exists, the caller is the subnet owner, + /// and the last emission block number has not been set yet. + /// It then sets the last emission block number to the current block number. + /// + /// # Parameters + /// + /// * `origin`: The origin of the call, which is used to ensure the caller is the subnet owner. + /// * `netuid`: The unique identifier of the subnet for which the start call process is being initiated. + /// + /// # Raises + /// + /// * `Error::::SubNetworkDoesNotExist`: If the subnet does not exist. + /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. + /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. + /// + /// # Returns + /// + /// * `DispatchResult`: A result indicating the success or failure of the operation. + pub fn do_start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + Self::ensure_subnet_owner(origin, netuid)?; + ensure!( + FirstEmissionBlockNumber::::get(netuid).is_none(), + Error::::FirstEmissionBlockNumberAlreadySet + ); + + let registration_block_number = NetworkRegisteredAt::::get(netuid); + let current_block_number = Self::get_current_block_as_u64(); + + ensure!( + current_block_number + >= registration_block_number.saturating_add(T::DurationOfStartCall::get()), + Error::::NeedWaitingMoreBlocksToStarCall + ); + let next_block_number = current_block_number.saturating_add(1); + + FirstEmissionBlockNumber::::insert(netuid, next_block_number); + Self::deposit_event(Event::FirstEmissionBlockNumberSet( + netuid, + next_block_number, + )); + Ok(()) + } + + pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { + FirstEmissionBlockNumber::::get(netuid).is_some() + } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index aaf3b5fe6b..c97252677c 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -23,6 +23,7 @@ impl Pallet { Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 21ddd453f5..faf48a8366 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -3382,17 +3382,17 @@ fn test_dividend_distribution_with_children() { "C should have pending emission of 1/9 of total emission" ); - let dividends_a = SubtensorModule::get_dividends_distribution( + let dividends_a = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_a, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_b = SubtensorModule::get_dividends_distribution( + let dividends_b = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_b, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_c = SubtensorModule::get_dividends_distribution( + let dividends_c = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_c, netuid, hardcoded_emission.saturating_to_num::(), @@ -3883,12 +3883,12 @@ fn test_dividend_distribution_with_children_same_coldkey_owner() { ); // Get the distribution of dividends including the Parent/Child relationship. - let dividends_a = SubtensorModule::get_dividends_distribution( + let dividends_a = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_a, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_b = SubtensorModule::get_dividends_distribution( + let dividends_b = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_b, netuid, hardcoded_emission.saturating_to_num::(), diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 9f59fe338e..0a95f6dd60 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -569,7 +569,7 @@ fn test_drain_base_with_subnet_with_single_staker_registered_root_weight() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); let root_after = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, root); - close(stake_before + pending_alpha / 2, stake_after, 10); // Registered gets all alpha emission. + close(stake_before + pending_alpha, stake_after, 10); // Registered gets all alpha emission. close(stake_before + pending_tao, root_after, 10); // Registered gets all tao emission }); } @@ -660,8 +660,8 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, netuid); let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); - close(stake_before + pending_alpha / 4, stake_after1, 10); // Registered gets 1/2 emission - close(stake_before + pending_alpha / 4, stake_after2, 10); // Registered gets 1/2 emission. + close(stake_before + pending_alpha / 2, stake_after1, 10); // Registered gets 1/2 emission + close(stake_before + pending_alpha / 2, stake_after2, 10); // Registered gets 1/2 emission. close(stake_before + pending_tao / 2, root_after1, 10); // Registered gets 1/2 tao emission close(stake_before + pending_tao / 2, root_after2, 10); // Registered gets 1/2 tao emission }); @@ -719,21 +719,17 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); let expected_stake = I96F32::from_num(stake_before) - + (I96F32::from_num(pending_alpha) - * I96F32::from_num(3.0 / 5.0) - * I96F32::from_num(1.0 / 3.0)); - close(expected_stake.to_num::(), stake_after1, 10); // Registered gets 60% of emission + + (I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0)); + assert_abs_diff_eq!(expected_stake.to_num::(), stake_after1, epsilon = 10); // Registered gets 50% of alpha emission let expected_stake2 = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(2.0 / 5.0) - * I96F32::from_num(1.0 / 2.0); - close(expected_stake2.to_num::(), stake_after2, 10); // Registered gets 40% emission + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake2.to_num::(), stake_after2, epsilon = 10); // Registered gets 50% emission let expected_root1 = I96F32::from_num(2 * stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - close(expected_root1.to_num::(), root_after1, 10); // Registered gets 2/3 tao emission + assert_abs_diff_eq!(expected_root1.to_num::(), root_after1, epsilon = 10); // Registered gets 2/3 tao emission let expected_root2 = I96F32::from_num(stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - close(expected_root2.to_num::(), root_after2, 10); // Registered gets 1/3 tao emission + assert_abs_diff_eq!(expected_root2.to_num::(), root_after2, epsilon = 10); // Registered gets 1/3 tao emission }); } @@ -789,26 +785,20 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, netuid); let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); - // hotkey 1 has (1 + (2 * 0.5))/( 1 + 1*0.5 + 1 + (2 * 0.5)) = 0.5714285714 of the hotkey emission. let expected_stake = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(0.5714285714) - * I96F32::from_num(1.0 / 2.0); - close(expected_stake.to_num::(), stake_after1, 10); - // hotkey 2 has (1 + 1*0.5)/( 1 + 1*0.5 + 1 + (2 * 0.5)) = 0.4285714286 of the hotkey emission. + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake.to_num::(), stake_after1, epsilon = 10); let expected_stake2 = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(0.4285714286) - * I96F32::from_num(2.0 / 3.0); - close(expected_stake2.to_num::(), stake_after2, 10); + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake2.to_num::(), stake_after2, epsilon = 10); // hotkey 1 has 2 / 3 root tao let expected_root1 = I96F32::from_num(2 * stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - close(expected_root1.to_num::(), root_after1, 10); + assert_abs_diff_eq!(expected_root1.to_num::(), root_after1, epsilon = 10); // hotkey 1 has 1 / 3 root tao let expected_root2 = I96F32::from_num(stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - close(expected_root2.to_num::(), root_after2, 10); + assert_abs_diff_eq!(expected_root2.to_num::(), root_after2, epsilon = 10); }); } @@ -1058,11 +1048,11 @@ fn test_get_root_children_drain() { // Alice and Bob both made half of the dividends. assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&alice, alpha), - alice_alpha_stake + pending_alpha / 4 + alice_alpha_stake + pending_alpha / 2 ); assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&bob, alpha), - bob_alpha_stake + pending_alpha / 4 + bob_alpha_stake + pending_alpha / 2 ); // Lets drain @@ -1092,9 +1082,10 @@ fn test_get_root_children_drain() { assert_eq!(AlphaDividendsPerSubnet::::get(alpha, alice), 0); assert_eq!(TaoDividendsPerSubnet::::get(alpha, alice), 0); // Bob makes it all. - assert_eq!( + assert_abs_diff_eq!( AlphaDividendsPerSubnet::::get(alpha, bob), - (I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 - 0.495412844)).to_num::() + pending_alpha, + epsilon = 1 ); assert_eq!(TaoDividendsPerSubnet::::get(alpha, bob), pending_root); }); @@ -1172,12 +1163,12 @@ fn test_get_root_children_drain_half_proportion() { // Alice and Bob make the same amount. close( AlphaDividendsPerSubnet::::get(alpha, alice), - pending_alpha / 4, + pending_alpha / 2, 10, ); close( AlphaDividendsPerSubnet::::get(alpha, bob), - pending_alpha / 4, + pending_alpha / 2, 10, ); }); @@ -1243,7 +1234,7 @@ fn test_get_root_children_drain_with_take() { // Set Bob as 100% child of Alice on root. ChildkeyTake::::insert(bob, alpha, u16::MAX); mock_set_children_no_epochs(alpha, &alice, &[(u64::MAX, bob)]); - // Set Bob childkey take to zero. + // Set Bob validator take to zero. Delegates::::insert(alice, 0); Delegates::::insert(bob, 0); @@ -1251,11 +1242,11 @@ fn test_get_root_children_drain_with_take() { let pending_alpha: u64 = 1_000_000_000; SubtensorModule::drain_pending_emission(alpha, pending_alpha, 0, 0, 0); - // Alice and Bob make the same amount. + // Bob makes it all. close(AlphaDividendsPerSubnet::::get(alpha, alice), 0, 10); close( AlphaDividendsPerSubnet::::get(alpha, bob), - pending_alpha / 2, + pending_alpha, 10, ); }); @@ -1332,12 +1323,12 @@ fn test_get_root_children_drain_with_half_take() { // Alice and Bob make the same amount. close( AlphaDividendsPerSubnet::::get(alpha, alice), - pending_alpha / 8, + pending_alpha / 4, 10000, ); close( AlphaDividendsPerSubnet::::get(alpha, bob), - 3 * (pending_alpha / 8), + 3 * (pending_alpha / 4), 10000, ); }); @@ -1454,9 +1445,9 @@ fn test_incentive_to_subnet_owner_is_burned() { let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); let pending_tao: u64 = 1_000_000_000; + let pending_alpha: u64 = 0; // None to valis let owner_cut: u64 = 0; let mut incentives: BTreeMap = BTreeMap::new(); - let mut dividends: BTreeMap = BTreeMap::new(); // Give incentive to other_hk incentives.insert(other_hk, 10_000_000); @@ -1474,10 +1465,10 @@ fn test_incentive_to_subnet_owner_is_burned() { // Distribute dividends and incentives SubtensorModule::distribute_dividends_and_incentives( netuid, - pending_tao, owner_cut, incentives, - dividends, + BTreeMap::new(), + BTreeMap::new(), ); // Verify stake after @@ -1488,3 +1479,655 @@ fn test_incentive_to_subnet_owner_is_burned() { assert!(other_stake_after > 0); }); } + +#[test] +fn test_calculate_dividend_distribution_totals() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 837_120_949_872; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_only_tao() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 0; + let pending_tao: u64 = 837_120_949_872; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_no_tao_weight() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 0; // If tao weight is 0, then only alpha dividends should be input. + let tao_weight: I96F32 = I96F32::saturating_from_num(0.0); // 0% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_only_alpha() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 0; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_and_incentive_distribution() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha = 123_456_789; + let pending_validator_alpha = pending_alpha / 2; // Pay half to validators. + let pending_tao: u64 = 0; + let pending_swapped = 0; // Only alpha output. + let tao_weight: I96F32 = I96F32::saturating_from_num(0.0); // 0% + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, pending_alpha / 2, pending_alpha / 2)]; + + let (incentives, (alpha_dividends, tao_dividends)) = + SubtensorModule::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + let incentives_total = incentives.values().sum::(); + let dividends_total = alpha_dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_abs_diff_eq!( + dividends_total.saturating_add(incentives_total), + pending_alpha, + epsilon = 2 + ); + }); +} + +#[test] +fn test_calculate_dividend_and_incentive_distribution_all_to_validators() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha = 123_456_789; + let pending_validator_alpha = pending_alpha; // Pay all to validators. + let pending_tao: u64 = 0; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.0); // 0% + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, 0, pending_alpha)]; + + let (incentives, (alpha_dividends, tao_dividends)) = + SubtensorModule::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + let incentives_total = incentives.values().sum::(); + let dividends_total = alpha_dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + pending_alpha + ); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 123_456_789; + let incentive: u64 = 683_051_923; + let total_emission: u64 = divdends.saturating_add(incentive); + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + total_emission + ); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives_only_validators() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 123_456_789; + let incentive: u64 = 0; + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!(dividends_total, divdends); + assert_eq!(incentives_total, 0); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives_only_miners() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 0; + let incentive: u64 = 123_456_789; + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!(incentives_total, incentive); + assert_eq!(dividends_total, divdends); + }); +} + +#[test] +fn test_drain_pending_emission_no_miners_all_drained() { + new_test_ext(1).execute_with(|| { + let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let init_stake: u64 = 1; + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + // Set the emission to be 1 million. + let emission: u64 = 1_000_000; + // Run drain pending without any miners. + SubtensorModule::drain_pending_emission(netuid, emission, 0, 0, 0); + + // Get the new stake of the hotkey. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect this neuron to get *all* the emission. + // Slight epsilon due to rounding (hotkey_take). + assert_abs_diff_eq!(new_stake, emission.saturating_add(init_stake), epsilon = 1); + }); +} + +#[test] +fn test_drain_pending_emission_zero_emission() { + new_test_ext(1).execute_with(|| { + let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 50); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + run_to_block_no_epoch(netuid, 50); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Set the emission to be ZERO. + SubtensorModule::drain_pending_emission(netuid, 0, 0, 0, 0); + + // Get the new stake of the hotkey. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert_eq!(new_stake, init_stake); + + // Check that the incentive and dividends are set by epoch. + assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!(Dividends::::get(netuid).iter().sum::() > 0); + }); +} + +#[test] +fn test_run_coinbase_not_started() { + new_test_ext(1).execute_with(|| { + let netuid = 1; + let tempo = 2; + + let sn_owner_hk = U256::from(7); + let sn_owner_ck = U256::from(8); + + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + SubnetOwner::::insert(netuid, sn_owner_ck); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hk); + + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + register_ok_neuron(netuid, sn_owner_hk, sn_owner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 30); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Step so tempo should run. + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + let current_block = System::block_number(); + assert!(SubtensorModule::should_run_epoch(netuid, current_block)); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(I96F32::saturating_from_num(100_000_000)); + + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + // Get the new stake of the hotkey. We expect no emissions. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert_eq!(new_stake, init_stake); + + // Check that the incentive and dividends are set. + assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!(Dividends::::get(netuid).iter().sum::() > 0); + }); +} + +#[test] +fn test_run_coinbase_not_started_start_after() { + new_test_ext(1).execute_with(|| { + let netuid = 1; + let tempo = 2; + + let sn_owner_hk = U256::from(7); + let sn_owner_ck = U256::from(8); + + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + SubnetOwner::::insert(netuid, sn_owner_ck); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hk); + + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + register_ok_neuron(netuid, sn_owner_hk, sn_owner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 30); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Step so tempo should run. + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + let current_block = System::block_number(); + assert!(SubtensorModule::should_run_epoch(netuid, current_block)); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(I96F32::saturating_from_num(100_000_000)); + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + let block_number = DurationOfStartCall::get(); + run_to_block_no_epoch(netuid, block_number); + + let current_block = System::block_number(); + + // Run start call. + assert_ok!(SubtensorModule::start_call( + RuntimeOrigin::signed(sn_owner_ck), + netuid + )); + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(current_block + 1) + ); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(I96F32::saturating_from_num(100_000_000)); + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + // Get the new stake of the hotkey. We expect no emissions. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert!(new_stake > init_stake); + log::info!("new_stake: {}", new_stake); + }); +} diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 38b104ac2b..aaaf93e086 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -8,6 +8,7 @@ use super::mock::*; use crate::epoch::math::safe_exp; use crate::*; +use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_ok}; // use frame_system::Config; @@ -989,7 +990,7 @@ fn test_bonds() { let sparse: bool = true; let n: u16 = 8; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); @@ -1018,7 +1019,7 @@ fn test_bonds() { SubtensorModule::set_max_allowed_validators(netuid, n); assert_eq!( SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch( netuid, 1_000_000_000 ); // run first epoch to set allowed validators - next_block(); // run to next block to ensure weights are set on nodes after their registration block + next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block // === Set weights [val->srv1: 0.1, val->srv2: 0.2, val->srv3: 0.3, val->srv4: 0.4] for uid in 0..(n/2) as u64 { @@ -1068,7 +1069,8 @@ fn test_bonds() { // === Set self-weight only on val1 let uid = 0; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* n: 8 @@ -1115,7 +1117,8 @@ fn test_bonds() { // === Set self-weight only on val2 let uid = 1; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 3 @@ -1151,7 +1154,8 @@ fn test_bonds() { // === Set self-weight only on val3 let uid = 2; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 4 @@ -1186,7 +1190,8 @@ fn test_bonds() { // === Set val3->srv4: 1 assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(2)), netuid, vec![7], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 5 @@ -1219,7 +1224,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 6 @@ -1240,7 +1246,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 7 @@ -1261,7 +1268,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 8 @@ -1286,7 +1294,7 @@ fn test_bonds_with_liquid_alpha() { let sparse: bool = true; let n: u16 = 8; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); @@ -1326,7 +1334,7 @@ fn test_bonds_with_liquid_alpha() { // Initilize with first epoch SubtensorModule::epoch(netuid, 1_000_000_000); - next_block(); + next_block_no_epoch(netuid); // Set weights for uid in 0..(n / 2) { @@ -1417,7 +1425,7 @@ fn test_bonds_with_liquid_alpha() { vec![u16::MAX], 0 )); - next_block(); + next_block_no_epoch(netuid); if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1439,7 +1447,7 @@ fn test_bonds_with_liquid_alpha() { vec![u16::MAX], 0 )); - next_block(); + next_block_no_epoch(netuid); if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1543,7 +1551,7 @@ fn test_active_stake() { let sparse: bool = true; let n: u16 = 4; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let block_number: u64 = System::block_number(); let stake: u64 = 1; add_network(netuid, tempo, 0); @@ -1586,7 +1594,7 @@ fn test_active_stake() { SubtensorModule::set_max_allowed_validators(netuid, n); assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators - next_block(); // run to next block to ensure weights are set on nodes after their registration block + next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block // === Set weights [val1->srv1: 0.5, val1->srv2: 0.5, val2->srv1: 0.5, val2->srv2: 0.5] for uid in 0..(n / 2) as u64 { @@ -1627,7 +1635,7 @@ fn test_active_stake() { } } let activity_cutoff: u64 = SubtensorModule::get_activity_cutoff(netuid) as u64; - run_to_block(activity_cutoff + 2); // run to block where validator (uid 0, 1) weights become outdated + run_to_block_no_epoch(netuid, activity_cutoff + 2); // run to block where validator (uid 0, 1) weights become outdated // === Update uid 0 weights assert_ok!(SubtensorModule::set_weights( @@ -1697,7 +1705,7 @@ fn test_active_stake() { vec![u16::MAX / (n / 2); (n / 2) as usize], 0 )); - run_to_block(activity_cutoff + 3); // run to block where validator (uid 0, 1) weights become outdated + run_to_block_no_epoch(netuid, activity_cutoff + 3); // run to block where validator (uid 0, 1) weights become outdated if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1750,7 +1758,7 @@ fn test_outdated_weights() { let sparse: bool = true; let n: u16 = 4; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 0; let mut block_number: u64 = System::block_number(); let stake: u64 = 1; add_network(netuid, tempo, 0); @@ -1796,7 +1804,7 @@ fn test_outdated_weights() { assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); - block_number = next_block(); // run to next block to ensure weights are set on nodes after their registration block + block_number = next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); // === Set weights [val1->srv1: 2/3, val1->srv2: 1/3, val2->srv1: 2/3, val2->srv2: 1/3, srv1->srv1: 1, srv2->srv2: 1] @@ -1877,7 +1885,7 @@ fn test_outdated_weights() { SubtensorModule::get_hotkey_for_net_and_uid(netuid, deregistered_uid) .expect("Not registered") ); - next_block(); // run to next block to outdate weights and bonds set on deregistered uid + next_block_no_epoch(netuid); // run to next block to outdate weights and bonds set on deregistered uid // === Update weights from only uid=0 assert_ok!(SubtensorModule::set_weights( @@ -2124,6 +2132,186 @@ fn test_zero_weights() { }); } +// Test that recently/deregistered miner bonds are cleared before EMA. +#[test] +fn test_deregistered_miner_bonds() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 4; + let netuid: u16 = 1; + let high_tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + + let stake: u64 = 1; + add_network(netuid, high_tempo, 0); + SubtensorModule::set_max_allowed_uids(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_min_allowed_weights(netuid, 0); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::set_bonds_penalty(netuid, u16::MAX); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + + // === Register [validator1, validator2, server1, server2] + let block_number = System::block_number(); + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), stake); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), + ); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stake, + ); + } + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, n); + assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); + SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); + next_block(); // run to next block to ensure weights are set on nodes after their registration block + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + + // === Set weights [val1->srv1: 2/3, val1->srv2: 1/3, val2->srv1: 2/3, val2->srv2: 1/3] + for uid in 0..(n / 2) as u64 { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![2 * (u16::MAX / 3), u16::MAX / 3], + 0 + )); + } + + // Set tempo high so we don't automatically run epochs + SubtensorModule::set_tempo(netuid, high_tempo); + + // Run 2 blocks + next_block(); + next_block(); + + // set tempo to 2 blocks + SubtensorModule::set_tempo(netuid, 2); + // Run epoch + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check the bond values for the servers + let bonds = SubtensorModule::get_bonds(netuid); + let bond_0_2 = bonds[0][2]; + let bond_0_3 = bonds[0][3]; + + // Non-zero bonds + assert!(bond_0_2 > 0); + assert!(bond_0_3 > 0); + + // Set tempo high so we don't automatically run epochs + SubtensorModule::set_tempo(netuid, high_tempo); + + // Run one more block + next_block(); + + // === Dereg server2 at uid3 (least emission) + register new key over uid3 + let new_key: u64 = n as u64; // register a new key while at max capacity, which means the least incentive uid will be deregistered + let block_number = System::block_number(); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 0, + &U256::from(new_key), + ); + assert_eq!(SubtensorModule::get_max_registrations_per_block(netuid), n); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(new_key)), + netuid, + block_number, + nonce, + work, + U256::from(new_key), + U256::from(new_key) + )); + let deregistered_uid: u16 = n - 1; // since uid=n-1 only recieved 1/3 of weight, it will get pruned first + assert_eq!( + U256::from(new_key), + SubtensorModule::get_hotkey_for_net_and_uid(netuid, deregistered_uid) + .expect("Not registered") + ); + + // Set weights again so they're active. + for uid in 0..(n / 2) as u64 { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![2 * (u16::MAX / 3), u16::MAX / 3], + 0 + )); + } + + // Run 1 block + next_block(); + // Assert block at registration happened after the last tempo + let block_at_registration = SubtensorModule::get_neuron_block_at_registration(netuid, 3); + let block_number = System::block_number(); + assert!( + block_at_registration >= block_number - 2, + "block at registration: {}, block number: {}", + block_at_registration, + block_number + ); + + // set tempo to 2 blocks + SubtensorModule::set_tempo(netuid, 2); + // Run epoch again. + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check the bond values for the servers + let bonds = SubtensorModule::get_bonds(netuid); + let bond_0_2_new = bonds[0][2]; + let bond_0_3_new = bonds[0][3]; + + // We expect the old bonds for server2, (uid3), to be reset. + // For server1, (uid2), the bond should be higher than before. + assert!( + bond_0_2_new >= bond_0_2, + "bond_0_2_new: {}, bond_0_2: {}", + bond_0_2_new, + bond_0_2 + ); + assert!( + bond_0_3_new <= bond_0_3, + "bond_0_3_new: {}, bond_0_3: {}", + bond_0_3_new, + bond_0_3 + ); + }); +} + // Test that epoch assigns validator permits to highest stake uids, varies uid interleaving and stake values. #[test] fn test_validator_permits() { @@ -2828,6 +3016,41 @@ fn test_can_set_self_weight_as_subnet_owner() { }); } +#[test] +fn test_epoch_outputs_single_staker_registered_no_weights() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let high_tempo: u16 = u16::MAX - 1; // Don't run automatically. + add_network(netuid, high_tempo, 0); + + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha: u64 = 1_000_000_000; + let hotkey_emission: Vec<(U256, u64, u64)> = SubtensorModule::epoch(netuid, pending_alpha); + + let sum_incentives: u64 = hotkey_emission + .iter() + .map(|(_, incentive, _)| incentive) + .sum(); + let sum_dividends: u64 = hotkey_emission + .iter() + .map(|(_, _, dividend)| dividend) + .sum(); + + assert_abs_diff_eq!( + sum_incentives.saturating_add(sum_dividends), + pending_alpha, + epsilon = 1_000 + ); + }); +} + // Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, // of which the first 64 are validators, the graph is split into a major and minor set, each setting // specific weight on itself and the complement on the other. diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index 036e2015ab..c70da2c9d2 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1220,6 +1220,45 @@ fn test_math_vec_mask_sparse_matrix() { ); } +#[test] +fn test_math_scalar_vec_mask_sparse_matrix() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 8., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 1; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a == b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![1., 2., 0., 4., 5., 0., 7., 8., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 5; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a <= b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 0., 3., 0., 0., 6., 0., 0., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 5; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a >= b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + #[test] fn test_math_row_hadamard() { let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index c0cbe1b81a..0628127413 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -11,6 +11,7 @@ use frame_support::{ traits::{StorageInstance, StoredMap}, weights::Weight, }; + use frame_system::Config; use sp_core::{H256, U256, crypto::Ss58Codec}; use sp_io::hashing::twox_128; @@ -416,3 +417,66 @@ fn test_migrate_subnet_volume() { assert_eq!(new_value, Some(old_value as u128)); }); } + +#[test] +fn test_migrate_set_first_emission_block_number() { + new_test_ext(1).execute_with(|| { + let netuids: [u16; 3] = [1, 2, 3]; + let block_number = 100; + for netuid in netuids.iter() { + add_network(*netuid, 1, 0); + } + run_to_block(block_number); + let weight = crate::migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::(); + + let expected_weight: Weight = ::DbWeight::get().reads(3) + ::DbWeight::get().writes(netuids.len() as u64); + assert_eq!(weight, expected_weight); + + assert_eq!(FirstEmissionBlockNumber::::get(0), None); + for netuid in netuids.iter() { + assert_eq!(FirstEmissionBlockNumber::::get(netuid), Some(block_number)); + } +}); +} + +#[test] +fn test_migrate_remove_zero_total_hotkey_alpha() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_remove_zero_total_hotkey_alpha"; + let netuid = 1u16; + + let hotkey_zero = U256::from(100u64); + let hotkey_nonzero = U256::from(101u64); + + // Insert one zero-alpha entry and one non-zero entry + TotalHotkeyAlpha::::insert(hotkey_zero, netuid, 0u64); + TotalHotkeyAlpha::::insert(hotkey_nonzero, netuid, 123u64); + + assert_eq!(TotalHotkeyAlpha::::get(hotkey_zero, netuid), 0u64); + assert_eq!(TotalHotkeyAlpha::::get(hotkey_nonzero, netuid), 123u64); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet." + ); + + let weight = crate::migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::(); + + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as run." + ); + + assert!( + !TotalHotkeyAlpha::::contains_key(hotkey_zero, netuid), + "Zero-alpha entry should have been removed." + ); + + assert_eq!(TotalHotkeyAlpha::::get(hotkey_nonzero, netuid), 123u64); + + assert!( + !weight.is_zero(), + "Migration weight should be non-zero." + ); + }); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 0d979a6126..9729d55d1a 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -138,7 +138,7 @@ parameter_types! { pub const InitialImmunityPeriod: u16 = 2; pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; - pub const InitialBondsPenalty:u16 = 0; + pub const InitialBondsPenalty:u16 = u16::MAX; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production @@ -152,7 +152,7 @@ parameter_types! { pub const InitialTxDelegateTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialTxChildKeyTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialBurn: u64 = 0; - pub const InitialMinBurn: u64 = 0; + pub const InitialMinBurn: u64 = 500_000; pub const InitialMaxBurn: u64 = 1_000_000_000; pub const InitialValidatorPruneLen: u64 = 0; pub const InitialScalingLawPower: u16 = 50; @@ -185,6 +185,7 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days pub const InitialTaoWeight: u64 = 0; // 100% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // Default as 7 days } // Configure collective pallet for council @@ -408,6 +409,7 @@ impl crate::Config for Test { type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } pub struct OriginPrivilegeCmp; @@ -592,6 +594,30 @@ pub(crate) fn run_to_block(n: u64) { } } +#[allow(dead_code)] +pub(crate) fn next_block_no_epoch(netuid: u16) -> u64 { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + let new_block = next_block(); + SubtensorModule::set_tempo(netuid, old_tempo); + + new_block +} + +#[allow(dead_code)] +pub(crate) fn run_to_block_no_epoch(netuid: u16, n: u64) { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + run_to_block(n); + SubtensorModule::set_tempo(netuid, old_tempo); +} + #[allow(dead_code)] pub(crate) fn step_epochs(count: u16, netuid: u16) { for _ in 0..count { @@ -662,6 +688,14 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::init_new_network(netuid, tempo); SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); + FirstEmissionBlockNumber::::insert(netuid, 1); +} + +#[allow(dead_code)] +pub fn add_network_without_emission_block(netuid: u16, tempo: u16, _modality: u16) { + SubtensorModule::init_new_network(netuid, tempo); + SubtensorModule::set_network_registration_allowed(netuid, true); + SubtensorModule::set_network_pow_registration_allowed(netuid, true); } #[allow(dead_code)] @@ -670,6 +704,22 @@ pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { let lock_cost = SubtensorModule::get_network_lock_cost(); SubtensorModule::add_balance_to_coldkey_account(coldkey, lock_cost); + assert_ok!(SubtensorModule::register_network( + RawOrigin::Signed(*coldkey).into(), + *hotkey + )); + NetworkRegistrationAllowed::::insert(netuid, true); + NetworkPowRegistrationAllowed::::insert(netuid, true); + FirstEmissionBlockNumber::::insert(netuid, 0); + netuid +} + +#[allow(dead_code)] +pub fn add_dynamic_network_without_emission_block(hotkey: &U256, coldkey: &U256) -> u16 { + let netuid = SubtensorModule::get_next_netuid(); + let lock_cost = SubtensorModule::get_network_lock_cost(); + SubtensorModule::add_balance_to_coldkey_account(coldkey, lock_cost); + assert_ok!(SubtensorModule::register_network( RawOrigin::Signed(*coldkey).into(), *hotkey diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 6865c9fa49..efd45ddef1 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -11,11 +11,13 @@ mod mock; mod move_stake; mod networks; mod neuron_info; +mod recycle_alpha; mod registration; mod senate; mod serving; mod staking; mod staking2; +mod subnet; mod swap_coldkey; mod swap_hotkey; mod uids; diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs new file mode 100644 index 0000000000..b142e5d3c9 --- /dev/null +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -0,0 +1,559 @@ +use approx::assert_abs_diff_eq; +use frame_support::{assert_noop, assert_ok, traits::Currency}; +use sp_core::U256; + +use super::mock::*; +use crate::*; + +#[test] +fn test_recycle_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < initial_alpha + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_two_stakers() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_staker_is_nominator() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + // Note: this coldkey DOES NOT own the hotkey, so it is a nominator. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + // Verify the ownership + assert_ne!( + SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey), + other_coldkey + ); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle from nominator coldkey + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(other_coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ) < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_burn_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_burn_staker_is_nominator() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + // Note: this coldkey DOES NOT own the hotkey, so it is a nominator. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn from nominator coldkey + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(other_coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ) < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_burn_two_stakers() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn from coldkey + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_errors() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let wrong_hotkey = U256::from(3); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + // Create root subnet + migrations::migrate_create_root_network::migrate_create_root_network::(); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake_amount = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + 99 // non-existent subnet + ), + Error::::SubNetworkDoesNotExist + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + SubtensorModule::get_root_netuid(), + ), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + wrong_hotkey, + 100_000, + netuid + ), + Error::::HotKeyAccountNotExists + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 10_000_000_000, // too much + netuid + ), + Error::::NotEnoughStakeToWithdraw + ); + + // make it pass the stake check + TotalHotkeyAlpha::::set( + hotkey, + netuid, + SubnetAlphaOut::::get(netuid).saturating_mul(2), + ); + + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + SubnetAlphaOut::::get(netuid) + 1, + netuid + ), + Error::::InsufficientLiquidity + ); + }); +} + +#[test] +fn test_burn_errors() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let wrong_hotkey = U256::from(3); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + // Create root subnet + migrations::migrate_create_root_network::migrate_create_root_network::(); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake_amount = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + 99 // non-existent subnet + ), + Error::::SubNetworkDoesNotExist + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + SubtensorModule::get_root_netuid(), + ), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + wrong_hotkey, + 100_000, + netuid + ), + Error::::HotKeyAccountNotExists + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 10_000_000_000, // too much + netuid + ), + Error::::NotEnoughStakeToWithdraw + ); + + // make it pass the hotkey alpha check + TotalHotkeyAlpha::::set( + hotkey, + netuid, + SubnetAlphaOut::::get(netuid).saturating_mul(2), + ); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + SubnetAlphaOut::::get(netuid) + 1, + netuid + ), + Error::::InsufficientLiquidity + ); + }); +} diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 50d409561d..1ae16d95c0 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -1,5 +1,6 @@ #![allow(clippy::unwrap_used)] +use approx::assert_abs_diff_eq; use frame_support::traits::Currency; use super::mock::*; @@ -535,11 +536,11 @@ fn test_burn_adjustment() { new_test_ext(1).execute_with(|| { let netuid: u16 = 1; let tempo: u16 = 13; - let burn_cost: u64 = 1000; + let init_burn_cost: u64 = InitialMinBurn::get() + 10_000; let adjustment_interval = 1; let target_registrations_per_interval = 1; add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); + SubtensorModule::set_burn(netuid, init_burn_cost); SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. SubtensorModule::set_target_registrations_per_interval( @@ -550,7 +551,7 @@ fn test_burn_adjustment() { // Register key 1. let hotkey_account_id_1 = U256::from(1); let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, init_burn_cost); assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(hotkey_account_id_1), netuid, @@ -560,7 +561,7 @@ fn test_burn_adjustment() { // Register key 2. let hotkey_account_id_2 = U256::from(2); let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, init_burn_cost); assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(hotkey_account_id_2), netuid, @@ -571,8 +572,13 @@ fn test_burn_adjustment() { // Step the block and trigger the adjustment. step_block(1); - // Check the adjusted burn. - assert_eq!(SubtensorModule::get_burn_as_u64(netuid), 1500); + // Check the adjusted burn is above the initial min burn. + assert!(SubtensorModule::get_burn_as_u64(netuid) > init_burn_cost); + assert_abs_diff_eq!( + SubtensorModule::get_burn_as_u64(netuid), + init_burn_cost.saturating_mul(3).saturating_div(2), // 1.5x + epsilon = 1000 + ); }); } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 33d686a604..1fc4e7b590 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -2311,7 +2311,10 @@ fn test_remove_stake_fee_realistic_values() { ); // Estimate fees - let expected_fee: f64 = current_price * alpha_divs as f64; + let mut expected_fee: f64 = current_price * alpha_divs as f64; + if expected_fee < alpha_to_unstake as f64 * 0.00005 { + expected_fee = alpha_to_unstake as f64 * 0.00005; + } // Remove stake to measure fee let balance_before = SubtensorModule::get_coldkey_balance(&coldkey); @@ -3903,7 +3906,7 @@ fn test_remove_99_9991_per_cent_stake_removes_all() { let coldkey_account_id = U256::from(81337); let amount = 10_000_000_000; let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); - let fee = DefaultStakingFee::::get(); + let mut fee = DefaultStakingFee::::get(); register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); // Give it some $$$ in his coldkey balance @@ -3923,17 +3926,19 @@ fn test_remove_99_9991_per_cent_stake_removes_all() { &coldkey_account_id, netuid, ); + let remove_amount = (U64F64::from_num(alpha) * U64F64::from_num(0.999991)).to_num::(); assert_ok!(SubtensorModule::remove_stake( RuntimeOrigin::signed(coldkey_account_id), hotkey_account_id, netuid, - (U64F64::from_num(alpha) * U64F64::from_num(0.999991)).to_num::() + remove_amount, )); // Check that all alpha was unstaked and all TAO balance was returned (less fees) + fee = fee + fee.max((remove_amount as f64 * 0.00005) as u64); assert_abs_diff_eq!( SubtensorModule::get_coldkey_balance(&coldkey_account_id), - amount - fee * 2, + amount - fee, epsilon = 10000, ); assert_eq!( diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs new file mode 100644 index 0000000000..4ceeaab897 --- /dev/null +++ b/pallets/subtensor/src/tests/subnet.rs @@ -0,0 +1,261 @@ +use super::mock::*; +use crate::*; +use frame_support::{assert_noop, assert_ok}; +use frame_system::Config; +use sp_core::U256; + +/*************************** + pub fn do_start_call() tests +*****************************/ + +#[test] +fn test_do_start_call_ok() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(block_number + 1) + ); + }); +} + +#[test] +fn test_do_start_call_fail_with_not_existed_subnet() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let coldkey_account_id = U256::from(0); + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_do_start_call_fail_not_owner() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let wrong_owner_account_id = U256::from(2); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + System::set_block_number(System::block_number() + DurationOfStartCall::get()); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(wrong_owner_account_id), + netuid + ), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_do_start_call_fail_with_cannot_start_call_now() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::NeedWaitingMoreBlocksToStarCall + ); + }); +} + +#[test] +fn test_do_start_call_fail_for_set_again() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::FirstEmissionBlockNumberAlreadySet + ); + }); +} + +#[test] +fn test_do_start_call_ok_with_same_block_number_after_coinbase() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(block_number + 1) + ); + + step_block(tempo); + match FirstEmissionBlockNumber::::get(netuid) { + Some(new_emission_block_number) => { + assert_eq!(new_emission_block_number, block_number + 1) + } + None => assert!(FirstEmissionBlockNumber::::get(netuid).is_some()), + } + }); +} + +#[test] +fn test_register_network_min_burn_at_default() { + new_test_ext(1).execute_with(|| { + let sn_owner_coldkey = U256::from(0); + let sn_owner_hotkey = U256::from(1); + let cost = SubtensorModule::get_network_lock_cost(); + + // Give coldkey enough for lock + SubtensorModule::add_balance_to_coldkey_account(&sn_owner_coldkey, cost + 10_000_000_000); + + // Register network + assert_ok!(SubtensorModule::register_network( + <::RuntimeOrigin>::signed(sn_owner_coldkey), + sn_owner_hotkey + )); + // Get last events + let events = System::events(); + let min_burn_event = events + .iter() + .filter(|event| { + matches!( + event.event, + RuntimeEvent::SubtensorModule(Event::::NetworkAdded(..)) + ) + }) + .last(); + + let netuid = match min_burn_event.map(|event| event.event.clone()) { + Some(RuntimeEvent::SubtensorModule(Event::::NetworkAdded(netuid, _))) => netuid, + _ => panic!("Expected NetworkAdded event"), + }; + + // Check min burn is set to default + assert_eq!(MinBurn::::get(netuid), InitialMinBurn::get()); + }); +} diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index 178613fbb6..92a8a64048 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -68,6 +68,7 @@ fn test_replace_neuron() { Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); + Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -138,6 +139,76 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip, 0); assert_eq!(axon_info.port, 0); assert_eq!(axon_info.ip_type, 0); + + // Check bonds are cleared. + assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + }); +} + +#[test] +fn test_bonds_cleared_on_replace() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 1; + let tempo: u16 = 13; + let hotkey_account_id = U256::from(1); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 111111, + &hotkey_account_id, + ); + let coldkey_account_id = U256::from(1234); + + let new_hotkey_account_id = U256::from(2); + let _new_colkey_account_id = U256::from(12345); + + //add network + add_network(netuid, tempo, 0); + + // Register a neuron. + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work, + hotkey_account_id, + coldkey_account_id + )); + + // Get UID + let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id); + assert_ok!(neuron_uid); + let neuron_uid = neuron_uid.unwrap(); + + // set non-default bonds + Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + + // Replace the neuron. + SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); + + // Check old hotkey is not registered on any network. + assert!(SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id).is_err()); + assert!(!SubtensorModule::is_hotkey_registered_on_any_network( + &hotkey_account_id + )); + + let curr_hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, neuron_uid); + assert_ok!(curr_hotkey); + assert_ne!(curr_hotkey.unwrap(), hotkey_account_id); + + // Check new hotkey is registered on the network. + assert!( + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &new_hotkey_account_id).is_ok() + ); + assert!(SubtensorModule::is_hotkey_registered_on_any_network( + &new_hotkey_account_id + )); + assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); + + // Check bonds are cleared. + assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); }); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 25799a75c1..e9ead1812f 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -89,6 +89,8 @@ pub use sp_runtime::{Perbill, Permill}; use core::marker::PhantomData; +use scale_info::TypeInfo; + // Frontier use fp_rpc::TransactionStatus; use pallet_ethereum::{Call::transact, PostLogContent, Transaction as EthereumTransaction}; @@ -205,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 252, + spec_version: 257, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -918,12 +920,22 @@ impl pallet_registry::Config for Runtime { } parameter_types! { - pub const MaxCommitFields: u32 = 1; + pub const MaxCommitFieldsInner: u32 = 1; pub const CommitmentInitialDeposit: Balance = 0; // Free pub const CommitmentFieldDeposit: Balance = 0; // Free pub const CommitmentRateLimit: BlockNumber = 100; // Allow commitment every 100 blocks } +#[subtensor_macros::freeze_struct("7c76bd954afbb54e")] +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct MaxCommitFields; +impl Get for MaxCommitFields { + fn get() -> u32 { + MaxCommitFieldsInner::get() + } +} + +#[subtensor_macros::freeze_struct("c39297f5eb97ee82")] pub struct AllowCommitments; impl CanCommit for AllowCommitments { #[cfg(not(feature = "runtime-benchmarks"))] @@ -948,6 +960,20 @@ impl pallet_commitments::Config for Runtime { type InitialDeposit = CommitmentInitialDeposit; type FieldDeposit = CommitmentFieldDeposit; type DefaultRateLimit = CommitmentRateLimit; + type TempoInterface = TempoInterface; +} + +pub struct TempoInterface; +impl pallet_commitments::GetTempoInterface for TempoInterface { + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) + } +} + +impl pallet_commitments::GetTempoInterface for Runtime { + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) + } } #[cfg(not(feature = "fast-blocks"))] @@ -984,7 +1010,7 @@ parameter_types! { pub const SubtensorInitialMaxRegistrationsPerBlock: u16 = 1; pub const SubtensorInitialPruningScore : u16 = u16::MAX; pub const SubtensorInitialBondsMovingAverage: u64 = 900_000; - pub const SubtensorInitialBondsPenalty: u16 = 0; + pub const SubtensorInitialBondsPenalty: u16 = u16::MAX; pub const SubtensorInitialDefaultTake: u16 = 11_796; // 18% honest number. pub const SubtensorInitialMinDelegateTake: u16 = 0; // Allow 0% delegate take pub const SubtensorInitialDefaultChildKeyTake: u16 = 0; // Allow 0% childkey take @@ -1018,6 +1044,11 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const SubtensorInitialTaoWeight: u64 = 971_718_665_099_567_868; // 0.05267697438728329% tao weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = if cfg!(feature = "fast-blocks") { + 10 // Only 10 blocks for fast blocks + } else { + 7 * 24 * 60 * 60 / 12 // 7 days + }; } impl pallet_subtensor::Config for Runtime { @@ -1082,6 +1113,7 @@ impl pallet_subtensor::Config for Runtime { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } use sp_runtime::BoundedVec;