From 6d1668d9d5444d79f3f7c0eea5eae0a583f14ab7 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Feb 2025 13:03:32 -0800 Subject: [PATCH 001/121] initial tlock commitments impl --- Cargo.lock | 5 ++ pallets/commitments/Cargo.toml | 7 +++ pallets/commitments/src/lib.rs | 88 ++++++++++++++++++++++++++++++-- pallets/commitments/src/mock.rs | 1 + pallets/commitments/src/types.rs | 58 +++++++++++++++++++++ pallets/drand/src/lib.rs | 2 +- 6 files changed, 157 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70dcd3d377..167febc2ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6087,11 +6087,15 @@ dependencies = [ name = "pallet-commitments" version = "4.0.0-dev" dependencies = [ + "ark-bls12-381", + "ark-serialize", "enumflags2", "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", + "pallet-drand", + "pallet-subtensor", "parity-scale-codec", "scale-info", "sp-core", @@ -6099,6 +6103,7 @@ dependencies = [ "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "subtensor-macros", + "tle", ] [[package]] diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index 7fb22aa1fb..77a2397e8d 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -29,6 +29,13 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } enumflags2 = { workspace = true } +pallet-subtensor = { default-features = false, path = "../subtensor" } + +pallet-drand = { path = "../drand", default-features = false } +tle = { workspace = true, default-features = false } +ark-bls12-381 = { workspace = true, default-features = false } +ark-serialize = { workspace = true, default-features = false } + [dev-dependencies] sp-core = { workspace = true } sp-io = { workspace = true } diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index ba11dbe52a..1830eb2033 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -31,7 +31,7 @@ pub mod pallet { // Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] - pub trait Config: frame_system::Config { + pub trait Config: frame_system::Config + pallet_drand::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -71,6 +71,22 @@ pub mod pallet { /// The account who: T::AccountId, }, + /// A timelock-encrypted commitment was set + TimelockCommitment { + /// The netuid of the commitment + netuid: u16, + /// The account + who: T::AccountId, + /// The drand round to reveal + reveal_round: u64, + }, + /// A timelock-encrypted commitment was auto-revealed + CommitmentRevealed { + /// The netuid of the commitment + netuid: u16, + /// The account + who: T::AccountId, + }, } #[pallet::error] @@ -117,13 +133,24 @@ pub mod pallet { BlockNumberFor, OptionQuery, >; + #[pallet::storage] + #[pallet::getter(fn timelock_commitment_of)] + pub(super) type TimelockCommitmentOf = StorageDoubleMap< + _, + Identity, + u16, // netuid + Twox64Concat, + T::AccountId, + TimelockCommitment>, + OptionQuery, + >; #[pallet::call] impl Pallet { /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( - T::WeightInfo::set_commitment(), + ::WeightInfo::set_commitment(), DispatchClass::Operational, Pays::No ))] @@ -187,7 +214,7 @@ pub mod pallet { /// Sudo-set the commitment rate limit #[pallet::call_index(1)] #[pallet::weight(( - T::WeightInfo::set_rate_limit(), + ::WeightInfo::set_rate_limit(), DispatchClass::Operational, Pays::No ))] @@ -196,6 +223,61 @@ pub mod pallet { RateLimit::::set(rate_limit_blocks.into()); Ok(()) } + /// Set a timelock-encrypted commitment for a given netuid + #[pallet::call_index(2)] + #[pallet::weight(( + ::WeightInfo::set_commitment(), + DispatchClass::Operational, + Pays::No + ))] + pub fn set_timelock_commitment( + origin: OriginFor, + netuid: u16, + encrypted_commitment: BoundedVec>, + reveal_round: u64, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!( + T::CanCommit::can_commit(netuid, &who), + Error::::AccountNotAllowedCommit + ); + + let cur_block = >::block_number(); + if let Some(last_commit) = >::get(netuid, &who) { + ensure!( + cur_block >= last_commit.saturating_add(RateLimit::::get()), + Error::::CommitmentSetRateLimitExceeded + ); + } + + // Calculate reveal block + let last_drand_round = pallet_drand::LastStoredRound::::get(); + let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); // 4 blocks per round (12s blocktime / 3s round) + let rounds_since_last = reveal_round.saturating_sub(last_drand_round); + let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); + let blocks_to_reveal: BlockNumberFor = blocks_to_reveal.try_into().map_err(|_| "Block number conversion failed")?; + let reveal_block = cur_block.saturating_add(blocks_to_reveal); + + // Construct the TimelockCommitment + let timelock_commitment = TimelockCommitment { + encrypted_commitment, + reveal_round, + reveal_block, + }; + + // Store the timelock commitment + >::insert(netuid, &who, timelock_commitment.clone()); + >::insert(netuid, &who, cur_block); + + // Emit event with hash computed on-demand + Self::deposit_event(Event::TimelockCommitment { + netuid, + who, + reveal_round, + }); + + Ok(()) + } } } diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 8866e1c0d5..2d28a44c01 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -14,6 +14,7 @@ frame_support::construct_runtime!( { System: frame_system = 1, Commitments: pallet_commitments = 2, + SubtensorModule: pallet_subtensor = 3, } ); diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index bc0531ece4..5f5ada908b 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -31,6 +31,10 @@ use sp_runtime::{ use sp_std::{fmt::Debug, iter::once, prelude::*}; use subtensor_macros::freeze_struct; +use crate::Config; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use tle::curves::drand::TinyBLS381; + /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater /// than 32-bytes then it will be truncated when encoding. /// @@ -295,6 +299,60 @@ pub struct CommitmentInfo> { pub fields: BoundedVec, } +/// Maximum size of the serialized timelock commitment in bytes +pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; + +/// Represents a timelock-encrypted commitment with reveal metadata +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] +pub struct TimelockCommitment { + /// The timelock-encrypted commitment data + pub encrypted_commitment: BoundedVec>, + /// The drand round number when this commitment can be revealed + pub reveal_round: u64, + /// The block number when the commitment should be revealed + pub reveal_block: BlockNumber, +} +/// Represents a revealed commitment after decryption +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] +pub struct RevealedCommitment, BlockNumber> { + /// The decrypted commitment info + pub info: CommitmentInfo, + /// The block it was revealed + pub revealed_block: BlockNumber, + /// The deposit held for the commitment + pub deposit: Balance, +} + +impl> TimelockCommitment { + /// Create a new TimelockCommitment from a TLECiphertext and reveal round + pub fn from_tle_ciphertext( + ciphertext: tle::tlock::TLECiphertext, + reveal_round: u64, + reveal_block: BlockNumber, + ) -> Result { + let mut encrypted_data = Vec::new(); + ciphertext + .serialize_compressed(&mut encrypted_data) + .map_err(|_| "Failed to serialize TLECiphertext")?; + + let bounded_encrypted = BoundedVec::try_from(encrypted_data) + .map_err(|_| "Encrypted commitment exceeds max size")?; + + Ok(TimelockCommitment { + encrypted_commitment: bounded_encrypted, + reveal_round, + reveal_block, + }) + } + + /// Attempt to deserialize the encrypted commitment back into a TLECiphertext + pub fn to_tle_ciphertext(&self) -> Result, &'static str> { + let mut reader = &self.encrypted_commitment[..]; + tle::tlock::TLECiphertext::::deserialize_compressed(&mut reader) + .map_err(|_| "Failed to deserialize TLECiphertext") + } +} + /// Information concerning the identity of the controller of an account. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index 40bf7ccb9b..d9da288212 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -219,7 +219,7 @@ pub mod pallet { pub type Pulses = StorageMap<_, Blake2_128Concat, RoundNumber, Pulse, OptionQuery>; #[pallet::storage] - pub(super) type LastStoredRound = StorageValue<_, RoundNumber, ValueQuery>; + pub type LastStoredRound = StorageValue<_, RoundNumber, ValueQuery>; /// Defines the block when next unsigned transaction will be accepted. /// From b9d69cac2b6b8acc9d5363aab781bd46d5fa073e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Feb 2025 13:23:44 -0800 Subject: [PATCH 002/121] Update tests.rs --- pallets/commitments/src/tests.rs | 132 ++++++++++++++++++++++--------- 1 file changed, 95 insertions(+), 37 deletions(-) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 15675d8ad8..5dcb3ba95c 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -2,7 +2,7 @@ use crate as pallet_commitments; use frame_support::derive_impl; -use frame_support::traits::ConstU64; +use frame_support::traits::{ConstU32, ConstU64}; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -10,7 +10,8 @@ use sp_runtime::{ }; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test @@ -18,39 +19,12 @@ frame_support::construct_runtime!( System: frame_system = 1, Balances: pallet_balances = 2, Commitments: pallet_commitments = 3, + Drand: pallet_drand = 4, } ); -#[allow(dead_code)] pub type AccountId = u64; -// The address format for describing accounts. -#[allow(dead_code)] -pub type Address = AccountId; - -// Balance of an account. -#[allow(dead_code)] -pub type Balance = u64; - -// An index to a block. -#[allow(dead_code)] -pub type BlockNumber = u64; - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -73,23 +47,107 @@ impl frame_system::Config for Test { type SystemWeightInfo = (); type SS58Prefix = ConstU16<42>; type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; type Block = Block; - type Nonce = u64; + type Nonce = u32; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); } impl pallet_commitments::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type WeightInfo = (); - type MaxFields = frame_support::traits::ConstU32<16>; + type MaxFields = ConstU32<16>; type CanCommit = (); - type FieldDeposit = frame_support::traits::ConstU64<0>; - type InitialDeposit = frame_support::traits::ConstU64<0>; - type DefaultRateLimit = frame_support::traits::ConstU64<0>; + type FieldDeposit = ConstU64<0>; + type InitialDeposit = ConstU64<0>; + type DefaultRateLimit = ConstU64<0>; +} + +impl pallet_drand::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_drand::weights::SubstrateWeight; + type AuthorityId = test_crypto::TestAuthId; + type Verifier = pallet_drand::verifier::QuicknetVerifier; + type UnsignedPriority = ConstU64<{ 1 << 20 }>; + type HttpFetchTimeout = ConstU64<1_000>; +} + +pub mod test_crypto { + use sp_core::sr25519::{Public as Sr25519Public, Signature as Sr25519Signature}; + use sp_runtime::{ + app_crypto::{app_crypto, sr25519}, + traits::IdentifyAccount, + }; + + pub const KEY_TYPE: sp_runtime::KeyTypeId = sp_runtime::KeyTypeId(*b"test"); + + app_crypto!(sr25519, KEY_TYPE); + + pub struct TestAuthId; + + impl frame_system::offchain::AppCrypto for TestAuthId { + type RuntimeAppPublic = Public; + type GenericSignature = Sr25519Signature; + type GenericPublic = Sr25519Public; + } + + impl IdentifyAccount for Public { + type AccountId = u64; + + fn into_account(self) -> u64 { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(self.as_ref()); + u64::from_le_bytes(bytes[..8].try_into().unwrap()) + } + } +} + +impl frame_system::offchain::SigningTypes for Test { + type Public = test_crypto::Public; + type Signature = test_crypto::Signature; +} + +impl frame_system::offchain::CreateSignedTransaction> for Test { + fn create_transaction>( + call: RuntimeCall, + _public: Self::Public, + account: Self::AccountId, + _nonce: u32, + ) -> Option<( + RuntimeCall, + ::SignaturePayload, + )> { + // Create a dummy sr25519 signature from a raw byte array + let dummy_raw = [0u8; 64]; + let dummy_signature = sp_core::sr25519::Signature::from(dummy_raw); + let signature = test_crypto::Signature::from(dummy_signature); + Some((call, (account, signature, ()))) + } +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; } -// // Build genesis storage according to the mock runtime. // pub fn new_test_ext() -> sp_io::TestExternalities { // let t = frame_system::GenesisConfig::::default() // .build_storage() From 6938c1ae07a426881c50439e4fab0eb28c43b1a8 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Feb 2025 13:23:50 -0800 Subject: [PATCH 003/121] fmt --- pallets/commitments/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 1830eb2033..244baa93b4 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -81,7 +81,7 @@ pub mod pallet { reveal_round: u64, }, /// A timelock-encrypted commitment was auto-revealed - CommitmentRevealed { + CommitmentRevealed { /// The netuid of the commitment netuid: u16, /// The account @@ -255,7 +255,9 @@ pub mod pallet { let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); // 4 blocks per round (12s blocktime / 3s round) let rounds_since_last = reveal_round.saturating_sub(last_drand_round); let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); - let blocks_to_reveal: BlockNumberFor = blocks_to_reveal.try_into().map_err(|_| "Block number conversion failed")?; + let blocks_to_reveal: BlockNumberFor = blocks_to_reveal + .try_into() + .map_err(|_| "Block number conversion failed")?; let reveal_block = cur_block.saturating_add(blocks_to_reveal); // Construct the TimelockCommitment From 3f3c0e0507fe508a157013bea0d913c78695080b Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 27 Feb 2025 15:51:27 -0800 Subject: [PATCH 004/121] refactor timelock commitments --- pallets/commitments/src/lib.rs | 160 ++++++++++++++----------------- pallets/commitments/src/tests.rs | 25 ++++- pallets/commitments/src/types.rs | 129 ++++++++++++++++++------- runtime/src/lib.rs | 10 +- 4 files changed, 200 insertions(+), 124 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 244baa93b4..b30c17cb16 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -46,7 +46,7 @@ pub mod pallet { /// The maximum number of additional fields that can be added to a commitment #[pallet::constant] - type MaxFields: Get; + type MaxFields: Get + TypeInfo + 'static; /// The amount held on deposit for a registered identity #[pallet::constant] @@ -141,7 +141,7 @@ pub mod pallet { u16, // netuid Twox64Concat, T::AccountId, - TimelockCommitment>, + CommitmentState, T::MaxFields, BlockNumberFor>, OptionQuery, >; @@ -179,36 +179,81 @@ pub mod pallet { ); } - let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); - let mut id = match >::get(netuid, &who) { - Some(mut id) => { - id.info = *info; - id.block = cur_block; - id + let is_timelock = info.fields.iter().any(|data| data.is_timelock_encrypted()); + + if !is_timelock { + let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); + let mut id = match >::get(netuid, &who) { + Some(mut id) => { + id.info = *info; + id.block = cur_block; + id + } + None => Registration { + info: *info, + block: cur_block, + deposit: Zero::zero(), + }, + }; + + let old_deposit = id.deposit; + id.deposit = T::InitialDeposit::get().saturating_add(fd); + if id.deposit > old_deposit { + T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; + } + if old_deposit > id.deposit { + let err_amount = + T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); + debug_assert!(err_amount.is_zero()); } - None => Registration { - info: *info, - block: cur_block, - deposit: Zero::zero(), - }, - }; - - let old_deposit = id.deposit; - id.deposit = T::InitialDeposit::get().saturating_add(fd); - if id.deposit > old_deposit { - T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; - } - if old_deposit > id.deposit { - let err_amount = - T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); - debug_assert!(err_amount.is_zero()); - } - >::insert(netuid, &who, id); - >::insert(netuid, &who, cur_block); - Self::deposit_event(Event::Commitment { netuid, who }); + >::insert(netuid, &who, id); + >::insert(netuid, &who, cur_block); + Self::deposit_event(Event::Commitment { netuid, who }); - Ok(()) + Ok(()) + } else { + ensure!( + info.fields.len() == 1, + Error::::TooManyFieldsInCommitmentInfo, + ); + + if let Data::TimelockEncrypted { + encrypted, + reveal_round, + } = &info.fields[0] + { + // Calculate reveal block + let last_drand_round = pallet_drand::LastStoredRound::::get(); + let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); + let rounds_since_last = reveal_round.saturating_sub(last_drand_round); + let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); + let blocks_to_reveal: BlockNumberFor = blocks_to_reveal + .try_into() + .map_err(|_| "Block number conversion failed")?; + let reveal_block = cur_block.saturating_add(blocks_to_reveal); + + // Construct CommitmentState for timelock commitment + let commitment_state = CommitmentState { + encrypted_commitment: encrypted.clone(), + reveal_round: *reveal_round, + reveal_block, + revealed: None, + }; + + // Store in TimelockCommitmentOf + >::insert(netuid, &who, commitment_state); + >::insert(netuid, &who, cur_block); + + // Emit timelock-specific event + Self::deposit_event(Event::TimelockCommitment { + netuid, + who, + reveal_round: *reveal_round, + }); + } + Ok(()) + } } /// Sudo-set the commitment rate limit @@ -223,63 +268,6 @@ pub mod pallet { RateLimit::::set(rate_limit_blocks.into()); Ok(()) } - /// Set a timelock-encrypted commitment for a given netuid - #[pallet::call_index(2)] - #[pallet::weight(( - ::WeightInfo::set_commitment(), - DispatchClass::Operational, - Pays::No - ))] - pub fn set_timelock_commitment( - origin: OriginFor, - netuid: u16, - encrypted_commitment: BoundedVec>, - reveal_round: u64, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!( - T::CanCommit::can_commit(netuid, &who), - Error::::AccountNotAllowedCommit - ); - - let cur_block = >::block_number(); - if let Some(last_commit) = >::get(netuid, &who) { - ensure!( - cur_block >= last_commit.saturating_add(RateLimit::::get()), - Error::::CommitmentSetRateLimitExceeded - ); - } - - // Calculate reveal block - let last_drand_round = pallet_drand::LastStoredRound::::get(); - let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); // 4 blocks per round (12s blocktime / 3s round) - let rounds_since_last = reveal_round.saturating_sub(last_drand_round); - let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); - let blocks_to_reveal: BlockNumberFor = blocks_to_reveal - .try_into() - .map_err(|_| "Block number conversion failed")?; - let reveal_block = cur_block.saturating_add(blocks_to_reveal); - - // Construct the TimelockCommitment - let timelock_commitment = TimelockCommitment { - encrypted_commitment, - reveal_round, - reveal_block, - }; - - // Store the timelock commitment - >::insert(netuid, &who, timelock_commitment.clone()); - >::insert(netuid, &who, cur_block); - - // Emit event with hash computed on-demand - Self::deposit_event(Event::TimelockCommitment { - netuid, - who, - reveal_round, - }); - - Ok(()) - } } } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 5dcb3ba95c..0742271114 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1,8 +1,11 @@ #![allow(non_camel_case_types)] use crate as pallet_commitments; -use frame_support::derive_impl; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + pallet_prelude::{Get, TypeInfo}, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -67,11 +70,27 @@ impl pallet_balances::Config for Test { type MaxFreezes = (); } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct TestMaxFields; +impl Get for TestMaxFields { + fn get() -> u32 { + 16 + } +} +impl TypeInfo for TestMaxFields { + type Identity = Self; + fn type_info() -> scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("TestMaxFields", module_path!())) + .composite(scale_info::build::Fields::unit()) + } +} + impl pallet_commitments::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type WeightInfo = (); - type MaxFields = ConstU32<16>; + type MaxFields = TestMaxFields; type CanCommit = (); type FieldDeposit = ConstU64<0>; type InitialDeposit = ConstU64<0>; diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 5f5ada908b..40b997589c 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -57,12 +57,22 @@ pub enum Data { /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. ShaThree256([u8; 32]), + /// A timelock-encrypted commitment with a reveal round. + TimelockEncrypted { + encrypted: BoundedVec>, + reveal_round: u64, + }, } impl Data { pub fn is_none(&self) -> bool { self == &Data::None } + + /// Check if this is a timelock-encrypted commitment. + pub fn is_timelock_encrypted(&self) -> bool { + matches!(self, Data::TimelockEncrypted { .. }) + } } impl Decode for Data { @@ -81,6 +91,15 @@ impl Decode for Data { 131 => Data::Sha256(<[u8; 32]>::decode(input)?), 132 => Data::Keccak256(<[u8; 32]>::decode(input)?), 133 => Data::ShaThree256(<[u8; 32]>::decode(input)?), + 134 => { + let encrypted = + BoundedVec::>::decode(input)?; + let reveal_round = u64::decode(input)?; + Data::TimelockEncrypted { + encrypted, + reveal_round, + } + } _ => return Err(codec::Error::from("invalid leading byte")), }) } @@ -100,6 +119,15 @@ impl Encode for Data { Data::Sha256(h) => once(131).chain(h.iter().cloned()).collect(), Data::Keccak256(h) => once(132).chain(h.iter().cloned()).collect(), Data::ShaThree256(h) => once(133).chain(h.iter().cloned()).collect(), + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + let mut r = vec![134]; + r.extend_from_slice(&encrypted.encode()); + r.extend_from_slice(&reveal_round.encode()); + r + } } } } @@ -274,6 +302,17 @@ impl TypeInfo for Data { .variant("ShaThree256", |v| { v.index(133) .fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }) + .variant("TimelockEncrypted", |v| { + v.index(134).fields( + Fields::named() + .field(|f| { + f.name("encrypted") + .ty::>>( + ) + }) + .field(|f| f.name("reveal_round").ty::()), + ) }); Type::builder() @@ -302,29 +341,26 @@ pub struct CommitmentInfo> { /// Maximum size of the serialized timelock commitment in bytes pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; -/// Represents a timelock-encrypted commitment with reveal metadata +/// Represents a commitment that can be either unrevealed (timelock-encrypted) or revealed. #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] -pub struct TimelockCommitment { - /// The timelock-encrypted commitment data +pub struct CommitmentState, BlockNumber> { pub encrypted_commitment: BoundedVec>, - /// The drand round number when this commitment can be revealed pub reveal_round: u64, - /// The block number when the commitment should be revealed pub reveal_block: BlockNumber, + pub revealed: Option>, } -/// Represents a revealed commitment after decryption + +/// Contains the decrypted data of a revealed commitment. #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] -pub struct RevealedCommitment, BlockNumber> { - /// The decrypted commitment info +pub struct RevealedData, BlockNumber> { pub info: CommitmentInfo, - /// The block it was revealed pub revealed_block: BlockNumber, - /// The deposit held for the commitment pub deposit: Balance, } -impl> TimelockCommitment { - /// Create a new TimelockCommitment from a TLECiphertext and reveal round +impl, BlockNumber: Clone + From> + CommitmentState +{ pub fn from_tle_ciphertext( ciphertext: tle::tlock::TLECiphertext, reveal_round: u64, @@ -334,18 +370,16 @@ impl> TimelockCommitment { ciphertext .serialize_compressed(&mut encrypted_data) .map_err(|_| "Failed to serialize TLECiphertext")?; - let bounded_encrypted = BoundedVec::try_from(encrypted_data) .map_err(|_| "Encrypted commitment exceeds max size")?; - - Ok(TimelockCommitment { + Ok(CommitmentState { encrypted_commitment: bounded_encrypted, reveal_round, reveal_block, + revealed: None, }) } - /// Attempt to deserialize the encrypted commitment back into a TLECiphertext pub fn to_tle_ciphertext(&self) -> Result, &'static str> { let mut reader = &self.encrypted_commitment[..]; tle::tlock::TLECiphertext::::deserialize_compressed(&mut reader) @@ -424,6 +458,7 @@ mod tests { Data::Keccak256(_) => "Keccak256".to_string(), Data::ShaThree256(_) => "ShaThree256".to_string(), Data::Raw(bytes) => format!("Raw{}", bytes.len()), + Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), }; if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { let variant = variant @@ -432,25 +467,45 @@ mod tests { .find(|v| v.name == variant_name) .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); - let field_arr_len = variant - .fields - .first() - .and_then(|f| registry.resolve(f.ty.id)) - .map(|ty| { - if let scale_info::TypeDef::Array(arr) = &ty.type_def { - arr.len - } else { - panic!("Should be an array type") - } - }) - .unwrap_or(0); - let encoded = data.encode(); assert_eq!(encoded[0], variant.index); - assert_eq!(encoded.len() as u32 - 1, field_arr_len); + + // For variants with fields, check the encoded length matches expected field lengths + if !variant.fields.is_empty() { + let expected_len = match data { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u32, + Data::BlakeTwo256(_) + | Data::Sha256(_) + | Data::Keccak256(_) + | Data::ShaThree256(_) => 32, + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + // Calculate length: encrypted (length prefixed) + reveal_round (u64) + let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix + let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes + encrypted_len + reveal_round_len + } + }; + assert_eq!( + encoded.len() as u32 - 1, // Subtract variant byte + expected_len, + "Encoded length mismatch for variant {}", + variant_name + ); + } else { + assert_eq!( + encoded.len() as u32 - 1, + 0, + "Expected no fields for {}", + variant_name + ); + } } else { - panic!("Should be a variant type") - }; + panic!("Should be a variant type"); + } }; let mut data = vec![ @@ -461,11 +516,17 @@ mod tests { Data::ShaThree256(Default::default()), ]; - // A Raw instance for all possible sizes of the Raw data + // Add Raw instances for all possible sizes for n in 0..128 { - data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())) + data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())); } + // Add a TimelockEncrypted instance + data.push(Data::TimelockEncrypted { + encrypted: vec![0u8; 64].try_into().unwrap(), // Example encrypted data + reveal_round: 12345, + }); + for d in data.iter() { check_type_info(d); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 11ca6d6d03..fba6d59849 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -990,12 +990,20 @@ impl pallet_registry::Config for Runtime { } parameter_types! { - pub const MaxCommitFields: u32 = 1; + pub const MaxCommitFieldsInner: u32 = 1; pub const CommitmentInitialDeposit: Balance = 0; // Free pub const CommitmentFieldDeposit: Balance = 0; // Free pub const CommitmentRateLimit: BlockNumber = 100; // Allow commitment every 100 blocks } +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct MaxCommitFields; +impl Get for MaxCommitFields { + fn get() -> u32 { + MaxCommitFieldsInner::get() + } +} + pub struct AllowCommitments; impl CanCommit for AllowCommitments { #[cfg(not(feature = "runtime-benchmarks"))] From 3c6bf20367928471b46a61880f1d8879e04b3e72 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 28 Feb 2025 16:10:26 -0800 Subject: [PATCH 005/121] add reveal_timelocked_commitments to block_step --- Cargo.lock | 6 +- pallets/admin-utils/Cargo.toml | 1 + pallets/admin-utils/src/tests/mock.rs | 37 ++- pallets/commitments/Cargo.toml | 5 +- pallets/commitments/src/lib.rs | 269 +++++++++++++------ pallets/commitments/src/types.rs | 42 --- pallets/subtensor/Cargo.toml | 2 + pallets/subtensor/src/coinbase/block_step.rs | 10 + pallets/subtensor/src/macros/config.rs | 3 + pallets/subtensor/src/tests/mock.rs | 36 +++ pallets/subtensor/src/utils/misc.rs | 6 + runtime/src/lib.rs | 1 + 12 files changed, 293 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 167febc2ab..cf24920518 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5989,6 +5989,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", + "pallet-commitments", "pallet-drand", "pallet-evm-chain-id", "pallet-grandpa", @@ -6087,15 +6088,14 @@ dependencies = [ name = "pallet-commitments" version = "4.0.0-dev" dependencies = [ - "ark-bls12-381", "ark-serialize", "enumflags2", "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", "pallet-drand", - "pallet-subtensor", "parity-scale-codec", "scale-info", "sp-core", @@ -6104,6 +6104,7 @@ dependencies = [ "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "subtensor-macros", "tle", + "w3f-bls", ] [[package]] @@ -6424,6 +6425,7 @@ dependencies = [ "num-traits", "pallet-balances", "pallet-collective", + "pallet-commitments", "pallet-drand", "pallet-membership", "pallet-preimage", diff --git a/pallets/admin-utils/Cargo.toml b/pallets/admin-utils/Cargo.toml index b3c1410cca..d0c4248aaf 100644 --- a/pallets/admin-utils/Cargo.toml +++ b/pallets/admin-utils/Cargo.toml @@ -32,6 +32,7 @@ substrate-fixed = { workspace = true } pallet-evm-chain-id = { workspace = true } pallet-drand = { workspace = true, default-features = false } sp-consensus-grandpa = { workspace = true } +pallet-commitments = { default-features = false, path = "../commitments" } [dev-dependencies] sp-core = { workspace = true } diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 40e29e54dd..b23e442705 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -10,7 +10,7 @@ use frame_system::{EnsureNever, EnsureRoot, limits}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityList as GrandpaAuthorityList; use sp_core::U256; -use sp_core::{ConstU64, H256}; +use sp_core::{ConstU64, H256, Encode, Decode, Get}; use sp_runtime::{ BuildStorage, KeyTypeId, Perbill, testing::TestXt, @@ -18,6 +18,7 @@ use sp_runtime::{ }; use sp_std::cmp::Ordering; use sp_weights::Weight; +use scale_info::TypeInfo; type Block = frame_system::mocking::MockBlock; @@ -32,6 +33,7 @@ frame_support::construct_runtime!( Drand: pallet_drand::{Pallet, Call, Storage, Event} = 6, Grandpa: pallet_grandpa = 7, EVMChainId: pallet_evm_chain_id = 8, + Commitments: pallet_commitments::{Pallet, Call, Storage, Event} = 9, } ); @@ -200,6 +202,7 @@ impl pallet_subtensor::Config for Test { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; + type CommitmentRuntime = Test; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -376,6 +379,38 @@ where type OverarchingCall = RuntimeCall; } +parameter_types! { + pub const MaxCommitFieldsInner: u32 = 1; + pub const CommitmentInitialDeposit: Balance = 0; + pub const CommitmentFieldDeposit: Balance = 0; + pub const CommitmentRateLimit: BlockNumber = 100; +} + +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct MaxCommitFields; +impl Get for MaxCommitFields { + fn get() -> u32 { + MaxCommitFieldsInner::get() + } +} + +pub struct AllowCommitments; +impl pallet_commitments::CanCommit for AllowCommitments { + fn can_commit(_netuid: u16, _address: &AccountId) -> bool { + true + } +} + +impl pallet_commitments::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type WeightInfo = pallet_commitments::weights::SubstrateWeight; + type CanCommit = AllowCommitments; + type MaxFields = MaxCommitFields; + type InitialDeposit = CommitmentInitialDeposit; + type FieldDeposit = CommitmentFieldDeposit; + type DefaultRateLimit = CommitmentRateLimit; +} // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { sp_tracing::try_init_simple(); diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index 77a2397e8d..31b4001c1a 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -29,12 +29,11 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } enumflags2 = { workspace = true } -pallet-subtensor = { default-features = false, path = "../subtensor" } - pallet-drand = { path = "../drand", default-features = false } tle = { workspace = true, default-features = false } -ark-bls12-381 = { workspace = true, default-features = false } ark-serialize = { workspace = true, default-features = false } +w3f-bls = { workspace = true, default-features = false } +log = { workspace = true } [dev-dependencies] sp-core = { workspace = true } diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index b30c17cb16..86738e5178 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -12,9 +12,17 @@ use subtensor_macros::freeze_struct; pub use types::*; pub use weights::WeightInfo; -use frame_support::traits::Currency; +use ark_serialize::CanonicalDeserialize; +use frame_support::{BoundedVec, traits::Currency}; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{Saturating, traits::Zero}; -use sp_std::boxed::Box; +use sp_std::{boxed::Box, vec::Vec}; +use tle::{ + curves::drand::TinyBLS381, + stream_ciphers::AESGCMStreamCipherProvider, + tlock::{TLECiphertext, tld}, +}; +use w3f_bls::EngineBLS; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -134,14 +142,14 @@ pub mod pallet { OptionQuery, >; #[pallet::storage] - #[pallet::getter(fn timelock_commitment_of)] - pub(super) type TimelockCommitmentOf = StorageDoubleMap< + #[pallet::getter(fn revealed_commitments)] + pub(super) type RevealedCommitments = StorageDoubleMap< _, Identity, - u16, // netuid + u16, Twox64Concat, T::AccountId, - CommitmentState, T::MaxFields, BlockNumberFor>, + RevealedData, T::MaxFields, BlockNumberFor>, OptionQuery, >; @@ -179,81 +187,49 @@ pub mod pallet { ); } - let is_timelock = info.fields.iter().any(|data| data.is_timelock_encrypted()); - - if !is_timelock { - let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); - let mut id = match >::get(netuid, &who) { - Some(mut id) => { - id.info = *info; - id.block = cur_block; - id - } - None => Registration { - info: *info, - block: cur_block, - deposit: Zero::zero(), - }, - }; - - let old_deposit = id.deposit; - id.deposit = T::InitialDeposit::get().saturating_add(fd); - if id.deposit > old_deposit { - T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; - } - if old_deposit > id.deposit { - let err_amount = - T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); - debug_assert!(err_amount.is_zero()); + let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); + let mut id = match >::get(netuid, &who) { + Some(mut id) => { + id.info = *info.clone(); + id.block = cur_block; + id } + None => Registration { + info: *info.clone(), + block: cur_block, + deposit: Zero::zero(), + }, + }; + + let old_deposit = id.deposit; + id.deposit = T::InitialDeposit::get().saturating_add(fd); + if id.deposit > old_deposit { + T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; + } + if old_deposit > id.deposit { + let err_amount = + T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); + debug_assert!(err_amount.is_zero()); + } - >::insert(netuid, &who, id); - >::insert(netuid, &who, cur_block); - Self::deposit_event(Event::Commitment { netuid, who }); - - Ok(()) + >::insert(netuid, &who, id); + >::insert(netuid, &who, cur_block); + + if let Some(Data::TimelockEncrypted { reveal_round, .. }) = info + .fields + .iter() + .find(|data| matches!(data, Data::TimelockEncrypted { .. })) + { + Self::deposit_event(Event::TimelockCommitment { + netuid, + who, + reveal_round: *reveal_round, + }); } else { - ensure!( - info.fields.len() == 1, - Error::::TooManyFieldsInCommitmentInfo, - ); - - if let Data::TimelockEncrypted { - encrypted, - reveal_round, - } = &info.fields[0] - { - // Calculate reveal block - let last_drand_round = pallet_drand::LastStoredRound::::get(); - let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); - let rounds_since_last = reveal_round.saturating_sub(last_drand_round); - let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); - let blocks_to_reveal: BlockNumberFor = blocks_to_reveal - .try_into() - .map_err(|_| "Block number conversion failed")?; - let reveal_block = cur_block.saturating_add(blocks_to_reveal); - - // Construct CommitmentState for timelock commitment - let commitment_state = CommitmentState { - encrypted_commitment: encrypted.clone(), - reveal_round: *reveal_round, - reveal_block, - revealed: None, - }; - - // Store in TimelockCommitmentOf - >::insert(netuid, &who, commitment_state); - >::insert(netuid, &who, cur_block); - - // Emit timelock-specific event - Self::deposit_event(Event::TimelockCommitment { - netuid, - who, - reveal_round: *reveal_round, - }); - } - Ok(()) + Self::deposit_event(Event::Commitment { netuid, who }); } + + Ok(()) } /// Sudo-set the commitment rate limit @@ -400,3 +376,142 @@ where Ok(()) } } + +impl Pallet { + pub fn reveal_timelocked_commitments(current_block: u64) -> DispatchResult { + let current_block = current_block + .try_into() + .map_err(|_| "Failed to convert u64 to BlockNumberFor")?; + + for (netuid, who, mut registration) in >::iter() { + if let Some(Data::TimelockEncrypted { + encrypted, + reveal_round, + .. + }) = registration + .info + .fields + .clone() + .iter() + .find(|data| matches!(data, Data::TimelockEncrypted { .. })) + { + // Calculate reveal block + let reveal_block = Self::calculate_reveal_block(*reveal_round, registration.block)?; + + // Check if the current block has reached or exceeded the reveal block + if current_block >= reveal_block { + // Deserialize the encrypted commitment into a TLECiphertext + let reader = &mut &encrypted[..]; + let commit = TLECiphertext::::deserialize_compressed(reader) + .map_err(|e| { + log::warn!("Failed to deserialize TLECiphertext for {:?}: {:?}", who, e) + }) + .ok(); + + let commit = match commit { + Some(c) => c, + None => continue, + }; + + // Get the drand pulse for the reveal round + let pulse = match pallet_drand::Pulses::::get(*reveal_round) { + Some(p) => p, + None => { + log::warn!( + "Failed to reveal commit for subnet {} by {:?}: missing drand round {}", + netuid, + who, + reveal_round + ); + continue; + } + }; + + // Prepare the signature bytes + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + let sig_reader = &mut &signature_bytes[..]; + let sig = ::SignatureGroup::deserialize_compressed( + sig_reader, + ) + .map_err(|e| { + log::warn!( + "Failed to deserialize drand signature for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let sig = match sig { + Some(s) => s, + None => continue, + }; + + // Decrypt the timelock commitment + let decrypted_bytes: Vec = + tld::(commit, sig) + .map_err(|e| { + log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) + }) + .ok() + .unwrap_or_default(); + + if decrypted_bytes.is_empty() { + continue; + } + + // Decode the decrypted bytes into CommitmentInfo (assuming it’s SCALE-encoded CommitmentInfo) + let mut reader = &decrypted_bytes[..]; + let revealed_info: CommitmentInfo = Decode::decode(&mut reader) + .map_err(|e| { + log::warn!("Failed to decode decrypted data for {:?}: {:?}", who, e) + }) + .ok() + .unwrap_or_else(|| CommitmentInfo { + fields: BoundedVec::default(), + }); + + // Create RevealedData for storage + let revealed_data = RevealedData { + info: revealed_info, + revealed_block: current_block, + deposit: registration.deposit, + }; + + // Store the revealed data in RevealedCommitments + >::insert(netuid, &who, revealed_data); + + // Remove the TimelockEncrypted field from the original commitment + let filtered_fields: Vec = registration.info.fields.into_iter() + .filter(|data| !matches!(data, Data::TimelockEncrypted { reveal_round: r, .. } if r == reveal_round)) + .collect(); + registration.info.fields = BoundedVec::try_from(filtered_fields) + .map_err(|_| "Failed to filter timelock fields")?; + + Self::deposit_event(Event::CommitmentRevealed { netuid, who }); + } + } + } + + Ok(()) + } + + fn calculate_reveal_block( + reveal_round: u64, + commit_block: BlockNumberFor, + ) -> Result, &'static str> { + let last_drand_round = pallet_drand::LastStoredRound::::get(); + let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); // 4 blocks per round (12s blocktime / 3s round) + let rounds_since_last = reveal_round.saturating_sub(last_drand_round); + let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); + let reveal_block = commit_block.saturating_add( + blocks_to_reveal + .try_into() + .map_err(|_| "Block number conversion failed")?, + ); + Ok(reveal_block) + } +} diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 40b997589c..7fe76a7194 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -31,10 +31,6 @@ use sp_runtime::{ use sp_std::{fmt::Debug, iter::once, prelude::*}; use subtensor_macros::freeze_struct; -use crate::Config; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use tle::curves::drand::TinyBLS381; - /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater /// than 32-bytes then it will be truncated when encoding. /// @@ -341,15 +337,6 @@ pub struct CommitmentInfo> { /// Maximum size of the serialized timelock commitment in bytes pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; -/// Represents a commitment that can be either unrevealed (timelock-encrypted) or revealed. -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] -pub struct CommitmentState, BlockNumber> { - pub encrypted_commitment: BoundedVec>, - pub reveal_round: u64, - pub reveal_block: BlockNumber, - pub revealed: Option>, -} - /// Contains the decrypted data of a revealed commitment. #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] pub struct RevealedData, BlockNumber> { @@ -358,35 +345,6 @@ pub struct RevealedData, BlockNumber> { pub deposit: Balance, } -impl, BlockNumber: Clone + From> - CommitmentState -{ - pub fn from_tle_ciphertext( - ciphertext: tle::tlock::TLECiphertext, - reveal_round: u64, - reveal_block: BlockNumber, - ) -> Result { - let mut encrypted_data = Vec::new(); - ciphertext - .serialize_compressed(&mut encrypted_data) - .map_err(|_| "Failed to serialize TLECiphertext")?; - let bounded_encrypted = BoundedVec::try_from(encrypted_data) - .map_err(|_| "Encrypted commitment exceeds max size")?; - Ok(CommitmentState { - encrypted_commitment: bounded_encrypted, - reveal_round, - reveal_block, - revealed: None, - }) - } - - pub fn to_tle_ciphertext(&self) -> Result, &'static str> { - let mut reader = &self.encrypted_commitment[..]; - tle::tlock::TLECiphertext::::deserialize_compressed(&mut reader) - .map_err(|_| "Failed to deserialize TLECiphertext") - } -} - /// Information concerning the identity of the controller of an account. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index f240245c47..34c7ee50d5 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -56,6 +56,8 @@ w3f-bls = { workspace = true, default-features = false } sha2 = { workspace = true } rand_chacha = { workspace = true } +pallet-commitments = { default-features = false, path = "../commitments" } + [dev-dependencies] pallet-balances = { workspace = true, features = ["std"] } pallet-scheduler = { workspace = true } diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 669f8e09da..6943a02657 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -18,6 +18,16 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); + + // --- 5. Unveil all matured timelocked entries + if let Err(e) = Self::reveal_timelocked_commitments(block_number) { + log::debug!( + "Failed to unveil matured commitments on block {} due to error: {:?}", + block_number, + e + ); + } + // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 72a4c5f0d3..e7ed269f99 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -47,6 +47,9 @@ mod config { /// the preimage to store the call data. type Preimages: QueryPreimage + StorePreimage; + /// The commitment pallet's runtime + type CommitmentRuntime: pallet_commitments::Config; + /// ================================= /// ==== Initial Value Constants ==== /// ================================= diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index aae6aa60ef..2f2eb5d8a4 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -38,6 +38,7 @@ frame_support::construct_runtime!( Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 9, Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 10, Drand: pallet_drand::{Pallet, Call, Storage, Event} = 11, + Commitments: pallet_commitments::{Pallet, Call, Storage, Event} = 12, } ); @@ -409,6 +410,15 @@ impl crate::Config for Test { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; + type CommitmentRuntime = Test; +} + + +parameter_types! { + pub const MaxCommitFieldsInner: u32 = 1; + pub const CommitmentInitialDeposit: Balance = 0; + pub const CommitmentFieldDeposit: Balance = 0; + pub const CommitmentRateLimit: BlockNumber = 100; } pub struct OriginPrivilegeCmp; @@ -532,6 +542,32 @@ impl frame_system::offchain::CreateSignedTransaction> f } } +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct MaxCommitFields; +impl Get for MaxCommitFields { + fn get() -> u32 { + MaxCommitFieldsInner::get() + } +} + +pub struct AllowCommitments; +impl pallet_commitments::CanCommit for AllowCommitments { + fn can_commit(_netuid: u16, _address: &AccountId) -> bool { + true + } +} + +impl pallet_commitments::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type WeightInfo = pallet_commitments::weights::SubstrateWeight; + type CanCommit = AllowCommitments; + type MaxFields = MaxCommitFields; + type InitialDeposit = CommitmentInitialDeposit; + type FieldDeposit = CommitmentFieldDeposit; + type DefaultRateLimit = CommitmentRateLimit; +} + #[allow(dead_code)] // Build genesis storage according to the mock runtime. pub fn new_test_ext(block_number: BlockNumber) -> sp_io::TestExternalities { diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index bd093a76b5..8615c8c7cd 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -743,4 +743,10 @@ impl Pallet { DissolveNetworkScheduleDuration::::set(duration); Self::deposit_event(Event::DissolveNetworkScheduleDurationSet(duration)); } + + pub fn reveal_timelocked_commitments(block_number: u64) -> DispatchResult { + pallet_commitments::Pallet::::reveal_timelocked_commitments( + block_number, + ) + } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index fba6d59849..8c2fbd43bf 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1162,6 +1162,7 @@ impl pallet_subtensor::Config for Runtime { type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; + type CommitmentRuntime = Runtime; } use sp_runtime::BoundedVec; From c7bcf7d9f0100ac051ab15733caffdca02d0da3f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 3 Mar 2025 10:53:29 -0800 Subject: [PATCH 006/121] add basic commitments tetsts --- pallets/admin-utils/Cargo.toml | 3 + pallets/admin-utils/src/tests/mock.rs | 4 +- pallets/commitments/Cargo.toml | 13 ++- pallets/commitments/src/tests.rs | 26 +++-- pallets/commitments/src/types.rs | 161 +++++++++++++++++++++++++- pallets/subtensor/Cargo.toml | 9 +- pallets/subtensor/src/tests/mock.rs | 3 +- 7 files changed, 199 insertions(+), 20 deletions(-) diff --git a/pallets/admin-utils/Cargo.toml b/pallets/admin-utils/Cargo.toml index d0c4248aaf..d8a413977f 100644 --- a/pallets/admin-utils/Cargo.toml +++ b/pallets/admin-utils/Cargo.toml @@ -68,6 +68,7 @@ std = [ "sp-tracing/std", "sp-weights/std", "substrate-fixed/std", + "pallet-commitments/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -79,6 +80,7 @@ runtime-benchmarks = [ "pallet-scheduler/runtime-benchmarks", "pallet-subtensor/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-commitments/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", @@ -90,4 +92,5 @@ try-runtime = [ "pallet-scheduler/try-runtime", "pallet-subtensor/try-runtime", "sp-runtime/try-runtime", + "pallet-commitments/try-runtime" ] diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index b23e442705..aa7d8cd1da 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -7,10 +7,11 @@ use frame_support::{ }; use frame_system as system; use frame_system::{EnsureNever, EnsureRoot, limits}; +use scale_info::TypeInfo; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityList as GrandpaAuthorityList; use sp_core::U256; -use sp_core::{ConstU64, H256, Encode, Decode, Get}; +use sp_core::{ConstU64, Decode, Encode, Get, H256}; use sp_runtime::{ BuildStorage, KeyTypeId, Perbill, testing::TestXt, @@ -18,7 +19,6 @@ use sp_runtime::{ }; use sp_std::cmp::Ordering; use sp_weights::Weight; -use scale_info::TypeInfo; type Block = frame_system::mocking::MockBlock; diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index 31b4001c1a..f8cb5e2cf7 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -53,18 +53,25 @@ std = [ "enumflags2/std", "pallet-balances/std", "sp-core/std", - "sp-io/std" + "sp-io/std", + "ark-serialize/std", + "log/std", + "pallet-drand/std", + "tle/std", + "w3f-bls/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-balances/runtime-benchmarks" + "pallet-balances/runtime-benchmarks", + "pallet-drand/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", - "sp-runtime/try-runtime" + "sp-runtime/try-runtime", + "pallet-drand/try-runtime" ] diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 0742271114..b33d07ca28 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -8,6 +8,7 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ + BuildStorage, testing::Header, traits::{BlakeTwo256, ConstU16, IdentityLookup}, }; @@ -86,12 +87,19 @@ impl TypeInfo for TestMaxFields { } } +pub struct TestCanCommit; +impl pallet_commitments::CanCommit for TestCanCommit { + fn can_commit(_netuid: u16, _who: &u64) -> bool { + true + } +} + impl pallet_commitments::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type WeightInfo = (); type MaxFields = TestMaxFields; - type CanCommit = (); + type CanCommit = TestCanCommit; type FieldDeposit = ConstU64<0>; type InitialDeposit = ConstU64<0>; type DefaultRateLimit = ConstU64<0>; @@ -167,11 +175,11 @@ where type OverarchingCall = RuntimeCall; } -// pub fn new_test_ext() -> sp_io::TestExternalities { -// let t = frame_system::GenesisConfig::::default() -// .build_storage() -// .unwrap(); -// let mut ext = sp_io::TestExternalities::new(t); -// ext.execute_with(|| System::set_block_number(1)); -// ext -// } +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default() + .build_storage() + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 7fe76a7194..8a43394aa1 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -400,6 +400,12 @@ impl< #[allow(clippy::indexing_slicing, clippy::unwrap_used)] mod tests { use super::*; + use crate::{ + Config, Error, Event, Pallet, RateLimit, + tests::{RuntimeEvent, RuntimeOrigin, Test, new_test_ext}, + }; + use frame_support::{BoundedVec, assert_noop, assert_ok}; + use frame_system::Pallet as System; #[test] fn manual_data_type_info() { @@ -481,7 +487,7 @@ mod tests { // Add a TimelockEncrypted instance data.push(Data::TimelockEncrypted { - encrypted: vec![0u8; 64].try_into().unwrap(), // Example encrypted data + encrypted: vec![0u8; 64].try_into().unwrap(), reveal_round: 12345, }); @@ -489,4 +495,157 @@ mod tests { check_type_info(d); } } + + #[test] + fn set_commitment_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).unwrap(), + ..Default::default() + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + let commitment = Pallet::::commitment_of(1, &1).unwrap(); + let initial_deposit: u64 = ::InitialDeposit::get(); + assert_eq!(commitment.deposit, initial_deposit); + assert_eq!(commitment.block, 1); + assert_eq!(Pallet::::last_commitment(1, &1), Some(1)); + }); + } + + #[test] + #[should_panic(expected = "BoundedVec::try_from failed")] + fn set_commitment_too_many_fields_panics() { + new_test_ext().execute_with(|| { + let max_fields: u32 = ::MaxFields::get(); + let fields = vec![Data::None; (max_fields + 1) as usize]; + + // This line will panic when 'BoundedVec::try_from(...)' sees too many items. + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), + ..Default::default() + }); + + // We never get here, because the constructor panics above. + let _ = + Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); + }); + } + + #[test] + fn set_commitment_rate_limit_exceeded() { + new_test_ext().execute_with(|| { + let rate_limit = ::DefaultRateLimit::get(); + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).unwrap(), + ..Default::default() + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + // Set block number to just before rate limit expires + System::::set_block_number(rate_limit); + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), + Error::::CommitmentSetRateLimitExceeded + ); + + // Set block number to after rate limit + System::::set_block_number(rate_limit + 1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + }); + } + + #[test] + fn set_commitment_updates_deposit() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info1 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 2]).unwrap(), + ..Default::default() + }); + let info2 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 3]).unwrap(), + ..Default::default() + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info1 + )); + let initial_deposit: u64 = ::InitialDeposit::get(); + let field_deposit: u64 = ::FieldDeposit::get(); + let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, &1).unwrap().deposit, + expected_deposit1 + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info2 + )); + let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, &1).unwrap().deposit, + expected_deposit2 + ); + }); + } + + #[test] + fn set_rate_limit_works() { + new_test_ext().execute_with(|| { + let default_rate_limit: u64 = ::DefaultRateLimit::get(); + assert_eq!(RateLimit::::get(), default_rate_limit); + + assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); + assert_eq!(RateLimit::::get(), 200); + + assert_noop!( + Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), + sp_runtime::DispatchError::BadOrigin + ); + }); + } + + #[test] + fn event_emission_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).unwrap(), + ..Default::default() + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + + let events = System::::events(); + assert!(events.iter().any(|e| matches!( + &e.event, + RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) + ))); + }); + } } diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index 34c7ee50d5..42b468b1d7 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -109,7 +109,8 @@ std = [ "rand_chacha/std", "safe-math/std", "sha2/std", - "share-pool/std" + "share-pool/std", + "pallet-commitments/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -122,7 +123,8 @@ runtime-benchmarks = [ "pallet-collective/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", - "pallet-drand/runtime-benchmarks" + "pallet-drand/runtime-benchmarks", + "pallet-commitments/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", @@ -135,7 +137,8 @@ try-runtime = [ "pallet-utility/try-runtime", "sp-runtime/try-runtime", "pallet-collective/try-runtime", - "pallet-drand/try-runtime" + "pallet-drand/try-runtime", + "pallet-commitments/try-runtime" ] pow-faucet = [] fast-blocks = [] diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 2f2eb5d8a4..bdaca2269b 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -413,7 +413,6 @@ impl crate::Config for Test { type CommitmentRuntime = Test; } - parameter_types! { pub const MaxCommitFieldsInner: u32 = 1; pub const CommitmentInitialDeposit: Balance = 0; @@ -553,7 +552,7 @@ impl Get for MaxCommitFields { pub struct AllowCommitments; impl pallet_commitments::CanCommit for AllowCommitments { fn can_commit(_netuid: u16, _address: &AccountId) -> bool { - true + true } } From f95ca6401d0a51584a367c9939165f8351a5f44b Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:25:26 -0800 Subject: [PATCH 007/121] restructure commitments pallet --- pallets/commitments/src/lib.rs | 3 + pallets/commitments/src/mock.rs | 155 ++++++++++-- pallets/commitments/src/tests.rs | 399 ++++++++++++++++++------------- pallets/commitments/src/types.rs | 254 -------------------- 4 files changed, 378 insertions(+), 433 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 86738e5178..77cc06ddb7 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -4,6 +4,9 @@ mod benchmarking; #[cfg(test)] mod tests; +#[cfg(test)] +mod mock; + pub mod types; pub mod weights; diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 2d28a44c01..b77c10548c 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -1,23 +1,33 @@ use crate as pallet_commitments; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + pallet_prelude::{Get, TypeInfo}, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, BuildStorage, + testing::Header, + traits::{BlakeTwo256, ConstU16, IdentityLookup}, }; -type Block = frame_system::mocking::MockBlock; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; -// Configure a mock runtime to test the pallet. frame_support::construct_runtime!( pub enum Test { System: frame_system = 1, - Commitments: pallet_commitments = 2, - SubtensorModule: pallet_subtensor = 3, + Balances: pallet_balances = 2, + Commitments: pallet_commitments = 3, + Drand: pallet_drand = 4, } ); +pub type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -25,36 +35,149 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; - type AccountData = (); + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = ConstU16<42>; type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; + type Block = Block; + type Nonce = u32; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct TestMaxFields; +impl Get for TestMaxFields { + fn get() -> u32 { + 16 + } +} +impl TypeInfo for TestMaxFields { + type Identity = Self; + fn type_info() -> scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("TestMaxFields", module_path!())) + .composite(scale_info::build::Fields::unit()) + } +} + +pub struct TestCanCommit; +impl pallet_commitments::CanCommit for TestCanCommit { + fn can_commit(_netuid: u16, _who: &u64) -> bool { + true + } } impl pallet_commitments::Config for Test { type RuntimeEvent = RuntimeEvent; + type Currency = Balances; type WeightInfo = (); - type MaxAdditionalFields = frame_support::traits::ConstU32<16>; - type CanRegisterIdentity = (); + type MaxFields = TestMaxFields; + type CanCommit = TestCanCommit; + type FieldDeposit = ConstU64<0>; + type InitialDeposit = ConstU64<0>; + type DefaultRateLimit = ConstU64<0>; +} + +impl pallet_drand::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_drand::weights::SubstrateWeight; + type AuthorityId = test_crypto::TestAuthId; + type Verifier = pallet_drand::verifier::QuicknetVerifier; + type UnsignedPriority = ConstU64<{ 1 << 20 }>; + type HttpFetchTimeout = ConstU64<1_000>; +} + +pub mod test_crypto { + use sp_core::sr25519::{Public as Sr25519Public, Signature as Sr25519Signature}; + use sp_runtime::{ + app_crypto::{app_crypto, sr25519}, + traits::IdentifyAccount, + }; + + pub const KEY_TYPE: sp_runtime::KeyTypeId = sp_runtime::KeyTypeId(*b"test"); + + app_crypto!(sr25519, KEY_TYPE); + + pub struct TestAuthId; + + impl frame_system::offchain::AppCrypto for TestAuthId { + type RuntimeAppPublic = Public; + type GenericSignature = Sr25519Signature; + type GenericPublic = Sr25519Public; + } + + impl IdentifyAccount for Public { + type AccountId = u64; + + fn into_account(self) -> u64 { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(self.as_ref()); + u64::from_le_bytes(bytes[..8].try_into().unwrap()) + } + } +} + +impl frame_system::offchain::SigningTypes for Test { + type Public = test_crypto::Public; + type Signature = test_crypto::Signature; +} + +impl frame_system::offchain::CreateSignedTransaction> for Test { + fn create_transaction>( + call: RuntimeCall, + _public: Self::Public, + account: Self::AccountId, + _nonce: u32, + ) -> Option<( + RuntimeCall, + ::SignaturePayload, + )> { + // Create a dummy sr25519 signature from a raw byte array + let dummy_raw = [0u8; 64]; + let dummy_signature = sp_core::sr25519::Signature::from(dummy_raw); + let signature = test_crypto::Signature::from(dummy_signature); + Some((call, (account, signature, ()))) + } +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; } -// Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::::default() + let t = frame_system::GenesisConfig::::default() .build_storage() - .unwrap() - .into() + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index b33d07ca28..c6b8d93d69 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1,185 +1,258 @@ -#![allow(non_camel_case_types)] - -use crate as pallet_commitments; -use frame_support::{ - derive_impl, - pallet_prelude::{Get, TypeInfo}, - traits::{ConstU32, ConstU64}, -}; -use sp_core::H256; -use sp_runtime::{ - BuildStorage, - testing::Header, - traits::{BlakeTwo256, ConstU16, IdentityLookup}, -}; - -pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system = 1, - Balances: pallet_balances = 2, - Commitments: pallet_commitments = 3, - Drand: pallet_drand = 4, - } -); - -pub type AccountId = u64; - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; - type Block = Block; - type Nonce = u32; -} +use crate::{CommitmentInfo, Data}; +use codec::Encode; +use frame_support::traits::Get; +use sp_std::prelude::*; -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); -} +#[cfg(test)] +#[allow(clippy::indexing_slicing, clippy::unwrap_used)] +mod tests { + use super::*; + use crate::{ + Config, Error, Event, Pallet, RateLimit, + mock::{RuntimeEvent, RuntimeOrigin, Test, new_test_ext}, + }; + use frame_support::{BoundedVec, assert_noop, assert_ok}; + use frame_system::Pallet as System; -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct TestMaxFields; -impl Get for TestMaxFields { - fn get() -> u32 { - 16 - } -} -impl TypeInfo for TestMaxFields { - type Identity = Self; - fn type_info() -> scale_info::Type { - scale_info::Type::builder() - .path(scale_info::Path::new("TestMaxFields", module_path!())) - .composite(scale_info::build::Fields::unit()) + #[test] + fn manual_data_type_info() { + let mut registry = scale_info::Registry::new(); + let type_id = registry.register_type(&scale_info::meta_type::()); + let registry: scale_info::PortableRegistry = registry.into(); + let type_info = registry.resolve(type_id.id).unwrap(); + + let check_type_info = |data: &Data| { + let variant_name = match data { + Data::None => "None".to_string(), + Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), + Data::Sha256(_) => "Sha256".to_string(), + Data::Keccak256(_) => "Keccak256".to_string(), + Data::ShaThree256(_) => "ShaThree256".to_string(), + Data::Raw(bytes) => format!("Raw{}", bytes.len()), + Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), + }; + if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { + let variant = variant + .variants + .iter() + .find(|v| v.name == variant_name) + .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); + + let encoded = data.encode(); + assert_eq!(encoded[0], variant.index); + + // For variants with fields, check the encoded length matches expected field lengths + if !variant.fields.is_empty() { + let expected_len = match data { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u32, + Data::BlakeTwo256(_) + | Data::Sha256(_) + | Data::Keccak256(_) + | Data::ShaThree256(_) => 32, + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + // Calculate length: encrypted (length prefixed) + reveal_round (u64) + let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix + let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes + encrypted_len + reveal_round_len + } + }; + assert_eq!( + encoded.len() as u32 - 1, // Subtract variant byte + expected_len, + "Encoded length mismatch for variant {}", + variant_name + ); + } else { + assert_eq!( + encoded.len() as u32 - 1, + 0, + "Expected no fields for {}", + variant_name + ); + } + } else { + panic!("Should be a variant type"); + } + }; + + let mut data = vec![ + Data::None, + Data::BlakeTwo256(Default::default()), + Data::Sha256(Default::default()), + Data::Keccak256(Default::default()), + Data::ShaThree256(Default::default()), + ]; + + // Add Raw instances for all possible sizes + for n in 0..128 { + data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())); + } + + // Add a TimelockEncrypted instance + data.push(Data::TimelockEncrypted { + encrypted: vec![0u8; 64].try_into().unwrap(), + reveal_round: 12345, + }); + + for d in data.iter() { + check_type_info(d); + } } -} -pub struct TestCanCommit; -impl pallet_commitments::CanCommit for TestCanCommit { - fn can_commit(_netuid: u16, _who: &u64) -> bool { - true + #[test] + fn set_commitment_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).unwrap(), + ..Default::default() + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + let commitment = Pallet::::commitment_of(1, &1).unwrap(); + let initial_deposit: u64 = ::InitialDeposit::get(); + assert_eq!(commitment.deposit, initial_deposit); + assert_eq!(commitment.block, 1); + assert_eq!(Pallet::::last_commitment(1, &1), Some(1)); + }); } -} -impl pallet_commitments::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type WeightInfo = (); - type MaxFields = TestMaxFields; - type CanCommit = TestCanCommit; - type FieldDeposit = ConstU64<0>; - type InitialDeposit = ConstU64<0>; - type DefaultRateLimit = ConstU64<0>; -} + #[test] + #[should_panic(expected = "BoundedVec::try_from failed")] + fn set_commitment_too_many_fields_panics() { + new_test_ext().execute_with(|| { + let max_fields: u32 = ::MaxFields::get(); + let fields = vec![Data::None; (max_fields + 1) as usize]; -impl pallet_drand::Config for Test { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_drand::weights::SubstrateWeight; - type AuthorityId = test_crypto::TestAuthId; - type Verifier = pallet_drand::verifier::QuicknetVerifier; - type UnsignedPriority = ConstU64<{ 1 << 20 }>; - type HttpFetchTimeout = ConstU64<1_000>; -} + // This line will panic when 'BoundedVec::try_from(...)' sees too many items. + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), + ..Default::default() + }); -pub mod test_crypto { - use sp_core::sr25519::{Public as Sr25519Public, Signature as Sr25519Signature}; - use sp_runtime::{ - app_crypto::{app_crypto, sr25519}, - traits::IdentifyAccount, - }; + // We never get here, because the constructor panics above. + let _ = + Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); + }); + } - pub const KEY_TYPE: sp_runtime::KeyTypeId = sp_runtime::KeyTypeId(*b"test"); + #[test] + fn set_commitment_rate_limit_exceeded() { + new_test_ext().execute_with(|| { + let rate_limit = ::DefaultRateLimit::get(); + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).unwrap(), + ..Default::default() + }); - app_crypto!(sr25519, KEY_TYPE); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); - pub struct TestAuthId; + // Set block number to just before rate limit expires + System::::set_block_number(rate_limit); + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), + Error::::CommitmentSetRateLimitExceeded + ); - impl frame_system::offchain::AppCrypto for TestAuthId { - type RuntimeAppPublic = Public; - type GenericSignature = Sr25519Signature; - type GenericPublic = Sr25519Public; + // Set block number to after rate limit + System::::set_block_number(rate_limit + 1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + }); } - impl IdentifyAccount for Public { - type AccountId = u64; + #[test] + fn set_commitment_updates_deposit() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info1 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 2]).unwrap(), + ..Default::default() + }); + let info2 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 3]).unwrap(), + ..Default::default() + }); - fn into_account(self) -> u64 { - let mut bytes = [0u8; 32]; - bytes.copy_from_slice(self.as_ref()); - u64::from_le_bytes(bytes[..8].try_into().unwrap()) - } + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info1 + )); + let initial_deposit: u64 = ::InitialDeposit::get(); + let field_deposit: u64 = ::FieldDeposit::get(); + let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, &1).unwrap().deposit, + expected_deposit1 + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info2 + )); + let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, &1).unwrap().deposit, + expected_deposit2 + ); + }); } -} -impl frame_system::offchain::SigningTypes for Test { - type Public = test_crypto::Public; - type Signature = test_crypto::Signature; -} + #[test] + fn set_rate_limit_works() { + new_test_ext().execute_with(|| { + let default_rate_limit: u64 = ::DefaultRateLimit::get(); + assert_eq!(RateLimit::::get(), default_rate_limit); + + assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); + assert_eq!(RateLimit::::get(), 200); -impl frame_system::offchain::CreateSignedTransaction> for Test { - fn create_transaction>( - call: RuntimeCall, - _public: Self::Public, - account: Self::AccountId, - _nonce: u32, - ) -> Option<( - RuntimeCall, - ::SignaturePayload, - )> { - // Create a dummy sr25519 signature from a raw byte array - let dummy_raw = [0u8; 64]; - let dummy_signature = sp_core::sr25519::Signature::from(dummy_raw); - let signature = test_crypto::Signature::from(dummy_signature); - Some((call, (account, signature, ()))) + assert_noop!( + Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), + sp_runtime::DispatchError::BadOrigin + ); + }); } -} -impl frame_system::offchain::SendTransactionTypes for Test -where - RuntimeCall: From, -{ - type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; -} + #[test] + fn event_emission_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).unwrap(), + ..Default::default() + }); -pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default() - .build_storage() - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + + let events = System::::events(); + assert!(events.iter().any(|e| matches!( + &e.event, + RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) + ))); + }); + } } diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 8a43394aa1..c59c7212e6 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -395,257 +395,3 @@ impl< }) } } - -#[cfg(test)] -#[allow(clippy::indexing_slicing, clippy::unwrap_used)] -mod tests { - use super::*; - use crate::{ - Config, Error, Event, Pallet, RateLimit, - tests::{RuntimeEvent, RuntimeOrigin, Test, new_test_ext}, - }; - use frame_support::{BoundedVec, assert_noop, assert_ok}; - use frame_system::Pallet as System; - - #[test] - fn manual_data_type_info() { - let mut registry = scale_info::Registry::new(); - let type_id = registry.register_type(&scale_info::meta_type::()); - let registry: scale_info::PortableRegistry = registry.into(); - let type_info = registry.resolve(type_id.id).unwrap(); - - let check_type_info = |data: &Data| { - let variant_name = match data { - Data::None => "None".to_string(), - Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), - Data::Sha256(_) => "Sha256".to_string(), - Data::Keccak256(_) => "Keccak256".to_string(), - Data::ShaThree256(_) => "ShaThree256".to_string(), - Data::Raw(bytes) => format!("Raw{}", bytes.len()), - Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), - }; - if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { - let variant = variant - .variants - .iter() - .find(|v| v.name == variant_name) - .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); - - let encoded = data.encode(); - assert_eq!(encoded[0], variant.index); - - // For variants with fields, check the encoded length matches expected field lengths - if !variant.fields.is_empty() { - let expected_len = match data { - Data::None => 0, - Data::Raw(bytes) => bytes.len() as u32, - Data::BlakeTwo256(_) - | Data::Sha256(_) - | Data::Keccak256(_) - | Data::ShaThree256(_) => 32, - Data::TimelockEncrypted { - encrypted, - reveal_round, - } => { - // Calculate length: encrypted (length prefixed) + reveal_round (u64) - let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix - let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes - encrypted_len + reveal_round_len - } - }; - assert_eq!( - encoded.len() as u32 - 1, // Subtract variant byte - expected_len, - "Encoded length mismatch for variant {}", - variant_name - ); - } else { - assert_eq!( - encoded.len() as u32 - 1, - 0, - "Expected no fields for {}", - variant_name - ); - } - } else { - panic!("Should be a variant type"); - } - }; - - let mut data = vec![ - Data::None, - Data::BlakeTwo256(Default::default()), - Data::Sha256(Default::default()), - Data::Keccak256(Default::default()), - Data::ShaThree256(Default::default()), - ]; - - // Add Raw instances for all possible sizes - for n in 0..128 { - data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())); - } - - // Add a TimelockEncrypted instance - data.push(Data::TimelockEncrypted { - encrypted: vec![0u8; 64].try_into().unwrap(), - reveal_round: 12345, - }); - - for d in data.iter() { - check_type_info(d); - } - } - - #[test] - fn set_commitment_works() { - new_test_ext().execute_with(|| { - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info.clone() - )); - - let commitment = Pallet::::commitment_of(1, &1).unwrap(); - let initial_deposit: u64 = ::InitialDeposit::get(); - assert_eq!(commitment.deposit, initial_deposit); - assert_eq!(commitment.block, 1); - assert_eq!(Pallet::::last_commitment(1, &1), Some(1)); - }); - } - - #[test] - #[should_panic(expected = "BoundedVec::try_from failed")] - fn set_commitment_too_many_fields_panics() { - new_test_ext().execute_with(|| { - let max_fields: u32 = ::MaxFields::get(); - let fields = vec![Data::None; (max_fields + 1) as usize]; - - // This line will panic when 'BoundedVec::try_from(...)' sees too many items. - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), - ..Default::default() - }); - - // We never get here, because the constructor panics above. - let _ = - Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); - }); - } - - #[test] - fn set_commitment_rate_limit_exceeded() { - new_test_ext().execute_with(|| { - let rate_limit = ::DefaultRateLimit::get(); - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info.clone() - )); - - // Set block number to just before rate limit expires - System::::set_block_number(rate_limit); - assert_noop!( - Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), - Error::::CommitmentSetRateLimitExceeded - ); - - // Set block number to after rate limit - System::::set_block_number(rate_limit + 1); - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info - )); - }); - } - - #[test] - fn set_commitment_updates_deposit() { - new_test_ext().execute_with(|| { - System::::set_block_number(1); - let info1 = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Default::default(); 2]).unwrap(), - ..Default::default() - }); - let info2 = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Default::default(); 3]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info1 - )); - let initial_deposit: u64 = ::InitialDeposit::get(); - let field_deposit: u64 = ::FieldDeposit::get(); - let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; - assert_eq!( - Pallet::::commitment_of(1, &1).unwrap().deposit, - expected_deposit1 - ); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info2 - )); - let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; - assert_eq!( - Pallet::::commitment_of(1, &1).unwrap().deposit, - expected_deposit2 - ); - }); - } - - #[test] - fn set_rate_limit_works() { - new_test_ext().execute_with(|| { - let default_rate_limit: u64 = ::DefaultRateLimit::get(); - assert_eq!(RateLimit::::get(), default_rate_limit); - - assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); - assert_eq!(RateLimit::::get(), 200); - - assert_noop!( - Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), - sp_runtime::DispatchError::BadOrigin - ); - }); - } - - #[test] - fn event_emission_works() { - new_test_ext().execute_with(|| { - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info - )); - - let events = System::::events(); - assert!(events.iter().any(|e| matches!( - &e.event, - RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) - ))); - }); - } -} From 423fe2c17c982d7f258d5027a4f6b1f7bf7ee718 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 3 Mar 2025 15:06:45 -0800 Subject: [PATCH 008/121] tlock commitments unit tests --- Cargo.lock | 3 + pallets/commitments/Cargo.toml | 4 + pallets/commitments/src/lib.rs | 179 +++++++++++------------- pallets/commitments/src/mock.rs | 76 +++++++++++ pallets/commitments/src/tests.rs | 224 ++++++++++++++++++++++++++++++- 5 files changed, 380 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf24920518..b65fb03087 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6093,11 +6093,14 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hex", "log", "pallet-balances", "pallet-drand", "parity-scale-codec", + "rand_chacha", "scale-info", + "sha2 0.10.8", "sp-core", "sp-io", "sp-runtime", diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index f8cb5e2cf7..c4507d44e9 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -33,6 +33,10 @@ pallet-drand = { path = "../drand", default-features = false } tle = { workspace = true, default-features = false } ark-serialize = { workspace = true, default-features = false } w3f-bls = { workspace = true, default-features = false } +rand_chacha = { workspace = true } +hex = { workspace = true } +sha2 = { workspace = true } + log = { workspace = true } [dev-dependencies] diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 77cc06ddb7..745e06b7c8 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -17,7 +17,6 @@ pub use weights::WeightInfo; use ark_serialize::CanonicalDeserialize; use frame_support::{BoundedVec, traits::Currency}; -use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{Saturating, traits::Zero}; use sp_std::{boxed::Box, vec::Vec}; use tle::{ @@ -398,123 +397,101 @@ impl Pallet { .iter() .find(|data| matches!(data, Data::TimelockEncrypted { .. })) { - // Calculate reveal block - let reveal_block = Self::calculate_reveal_block(*reveal_round, registration.block)?; - - // Check if the current block has reached or exceeded the reveal block - if current_block >= reveal_block { - // Deserialize the encrypted commitment into a TLECiphertext - let reader = &mut &encrypted[..]; - let commit = TLECiphertext::::deserialize_compressed(reader) + // Check if the corresponding Drand round data exists + let pulse = match pallet_drand::Pulses::::get(*reveal_round) { + Some(p) => p, + None => continue, + }; + + // Prepare the signature bytes + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + let sig_reader = &mut &signature_bytes[..]; + let sig = + ::SignatureGroup::deserialize_compressed(sig_reader) .map_err(|e| { - log::warn!("Failed to deserialize TLECiphertext for {:?}: {:?}", who, e) + log::warn!( + "Failed to deserialize drand signature for {:?}: {:?}", + who, + e + ) }) .ok(); - let commit = match commit { - Some(c) => c, - None => continue, - }; + let sig = match sig { + Some(s) => s, + None => continue, + }; - // Get the drand pulse for the reveal round - let pulse = match pallet_drand::Pulses::::get(*reveal_round) { - Some(p) => p, - None => { - log::warn!( - "Failed to reveal commit for subnet {} by {:?}: missing drand round {}", - netuid, - who, - reveal_round - ); - continue; - } - }; - - // Prepare the signature bytes - let signature_bytes = pulse - .signature - .strip_prefix(b"0x") - .unwrap_or(&pulse.signature); - let sig_reader = &mut &signature_bytes[..]; - let sig = ::SignatureGroup::deserialize_compressed( - sig_reader, - ) + // Attempt to deserialize the encrypted commitment + let reader = &mut &encrypted[..]; + let commit = TLECiphertext::::deserialize_compressed(reader) .map_err(|e| { - log::warn!( - "Failed to deserialize drand signature for {:?}: {:?}", - who, - e - ) + log::warn!("Failed to deserialize TLECiphertext for {:?}: {:?}", who, e) }) .ok(); - let sig = match sig { - Some(s) => s, - None => continue, - }; - - // Decrypt the timelock commitment - let decrypted_bytes: Vec = - tld::(commit, sig) - .map_err(|e| { - log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) - }) - .ok() - .unwrap_or_default(); - - if decrypted_bytes.is_empty() { - continue; - } + let commit = match commit { + Some(c) => c, + None => continue, + }; - // Decode the decrypted bytes into CommitmentInfo (assuming it’s SCALE-encoded CommitmentInfo) - let mut reader = &decrypted_bytes[..]; - let revealed_info: CommitmentInfo = Decode::decode(&mut reader) + // Decrypt the timelock commitment + let decrypted_bytes: Vec = + tld::(commit, sig) .map_err(|e| { - log::warn!("Failed to decode decrypted data for {:?}: {:?}", who, e) + log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) }) .ok() - .unwrap_or_else(|| CommitmentInfo { - fields: BoundedVec::default(), - }); - - // Create RevealedData for storage - let revealed_data = RevealedData { - info: revealed_info, - revealed_block: current_block, - deposit: registration.deposit, - }; - - // Store the revealed data in RevealedCommitments - >::insert(netuid, &who, revealed_data); - - // Remove the TimelockEncrypted field from the original commitment - let filtered_fields: Vec = registration.info.fields.into_iter() - .filter(|data| !matches!(data, Data::TimelockEncrypted { reveal_round: r, .. } if r == reveal_round)) - .collect(); - registration.info.fields = BoundedVec::try_from(filtered_fields) - .map_err(|_| "Failed to filter timelock fields")?; - - Self::deposit_event(Event::CommitmentRevealed { netuid, who }); + .unwrap_or_default(); + + if decrypted_bytes.is_empty() { + continue; } + + // Decode the decrypted bytes into CommitmentInfo + let mut reader = &decrypted_bytes[..]; + let revealed_info: CommitmentInfo = match Decode::decode(&mut reader) + { + Ok(info) => info, + Err(e) => { + log::warn!("Failed to decode decrypted data for {:?}: {:?}", who, e); + continue; + } + }; + + // Store the revealed data + let revealed_data = RevealedData { + info: revealed_info, + revealed_block: current_block, + deposit: registration.deposit, + }; + >::insert(netuid, &who, revealed_data); + + // Remove the TimelockEncrypted field from the original commitment + let filtered_fields: Vec = registration + .info + .fields + .into_iter() + .filter(|data| { + !matches!( + data, + Data::TimelockEncrypted { + reveal_round: r, .. + } if r == reveal_round + ) + }) + .collect(); + + registration.info.fields = BoundedVec::try_from(filtered_fields) + .map_err(|_| "Failed to filter timelock fields")?; + + Self::deposit_event(Event::CommitmentRevealed { netuid, who }); } } Ok(()) } - - fn calculate_reveal_block( - reveal_round: u64, - commit_block: BlockNumberFor, - ) -> Result, &'static str> { - let last_drand_round = pallet_drand::LastStoredRound::::get(); - let blocks_per_round = 12_u64.checked_div(3).unwrap_or(0); // 4 blocks per round (12s blocktime / 3s round) - let rounds_since_last = reveal_round.saturating_sub(last_drand_round); - let blocks_to_reveal = rounds_since_last.saturating_mul(blocks_per_round); - let reveal_block = commit_block.saturating_add( - blocks_to_reveal - .try_into() - .map_err(|_| "Block number conversion failed")?, - ); - Ok(reveal_block) - } } diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index b77c10548c..6c38868962 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -181,3 +181,79 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext.execute_with(|| System::set_block_number(1)); ext } + +use super::*; +use crate::{EngineBLS, MAX_TIMELOCK_COMMITMENT_SIZE_BYTES, TinyBLS381}; +use ark_serialize::CanonicalSerialize; +use frame_support::BoundedVec; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use tle::{ibe::fullident::Identity, stream_ciphers::AESGCMStreamCipherProvider, tlock::tle}; + +// Drand Quicknet public key and signature for round=1000: +pub const DRAND_QUICKNET_PUBKEY_HEX: &str = "83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6\ + a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809b\ + d274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a"; +pub const DRAND_QUICKNET_SIG_HEX: &str = "b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39"; + +/// Inserts a Drand pulse for `round` with the given `signature_bytes`. +pub fn insert_drand_pulse(round: u64, signature_bytes: &[u8]) { + let sig_bounded: BoundedVec> = signature_bytes + .to_vec() + .try_into() + .expect("Signature within 144 bytes"); + + let randomness_bounded: BoundedVec> = vec![0u8; 32] + .try_into() + .expect("Randomness must be exactly 32 bytes"); + + pallet_drand::Pulses::::insert( + round, + pallet_drand::types::Pulse { + round, + randomness: randomness_bounded, + signature: sig_bounded, + }, + ); +} + +/// Produces a **real** ciphertext by TLE-encrypting `plaintext` for Drand Quicknet `round`. +/// +/// The returned `BoundedVec>` +/// will decrypt if you pass in the valid signature for the same round. +pub fn produce_ciphertext( + plaintext: &[u8], + round: u64, +) -> BoundedVec> { + // 1) Deserialize the known Drand Quicknet public key: + let pub_key_bytes = hex::decode(DRAND_QUICKNET_PUBKEY_HEX).expect("decode pubkey"); + let pub_key = + ::PublicKeyGroup::deserialize_compressed(&pub_key_bytes[..]) + .expect("bad pubkey bytes"); + + // 2) Prepare the identity for that round + // by hashing round.to_be_bytes() with SHA256: + let msg = { + let mut hasher = sha2::Sha256::new(); + hasher.update(round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![msg]); + + // 3) Actually encrypt + // (just an example ephemeral secret key & RNG seed) + let esk = [2u8; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + + let ct = tle::( + pub_key, esk, plaintext, identity, rng, + ) + .expect("Encryption failed in produce_real_ciphertext"); + + // 4) Serialize the ciphertext to BoundedVec + let mut ct_bytes = Vec::new(); + ct.serialize_compressed(&mut ct_bytes) + .expect("serialize TLECiphertext"); + + ct_bytes.try_into().expect("Ciphertext is within max size") +} diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index c6b8d93d69..fd13683241 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1,6 +1,4 @@ -use crate::{CommitmentInfo, Data}; use codec::Encode; -use frame_support::traits::Get; use sp_std::prelude::*; #[cfg(test)] @@ -8,10 +6,13 @@ use sp_std::prelude::*; mod tests { use super::*; use crate::{ - Config, Error, Event, Pallet, RateLimit, - mock::{RuntimeEvent, RuntimeOrigin, Test, new_test_ext}, + CommitmentInfo, Config, Data, Error, Event, Pallet, RateLimit, RevealedCommitments, + mock::{ + DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, + new_test_ext, produce_ciphertext, + }, }; - use frame_support::{BoundedVec, assert_noop, assert_ok}; + use frame_support::{BoundedVec, assert_noop, assert_ok, traits::Get}; use frame_system::Pallet as System; #[test] @@ -255,4 +256,217 @@ mod tests { ))); }); } + + #[test] + fn happy_path_timelock_commitments() { + new_test_ext().execute_with(|| { + let message_text = b"Hello timelock only!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes for Raw variant"), + ); + let fields_vec = vec![data_raw]; + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(fields_vec).expect("Too many fields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let data = Data::TimelockEncrypted { + encrypted: encrypted.clone(), + reveal_round, + }; + + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Too many fields"); + let info_outer = CommitmentInfo { + fields: fields_outer, + }; + + let who = 123; + let netuid = 42; + System::::set_block_number(1); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).unwrap(); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + + let revealed = + RevealedCommitments::::get(netuid, &who).expect("Should have revealed data"); + + let revealed_inner = &revealed.info; + assert_eq!(revealed_inner.fields.len(), 1); + match &revealed_inner.fields[0] { + Data::Raw(bounded_bytes) => { + assert_eq!( + bounded_bytes.as_slice(), + message_text, + "Decrypted text from on-chain storage must match the original message" + ); + } + other => panic!("Expected Data::Raw(...) in revealed, got {:?}", other), + } + }); + } + + #[test] + fn reveal_timelocked_commitment_missing_round_does_nothing() { + new_test_ext().execute_with(|| { + let who = 1; + let netuid = 2; + System::::set_block_number(5); + let ciphertext = produce_ciphertext(b"My plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: ciphertext, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).unwrap(); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + System::::set_block_number(100_000); + assert_ok!(Pallet::::reveal_timelocked_commitments(100_000)); + assert!(RevealedCommitments::::get(netuid, &who).is_none()); + }); + } + + #[test] + fn reveal_timelocked_commitment_cant_deserialize_ciphertext() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 9; + System::::set_block_number(10); + let good_ct = produce_ciphertext(b"Some data", 1000); + let mut corrupted = good_ct.into_inner(); + if !corrupted.is_empty() { + corrupted[0] = 0xFF; + } + let corrupted_ct = BoundedVec::try_from(corrupted).unwrap(); + let data = Data::TimelockEncrypted { + encrypted: corrupted_ct, + reveal_round: 1000, + }; + let fields = BoundedVec::try_from(vec![data]).unwrap(); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).unwrap(); + insert_drand_pulse(1000, &sig_bytes); + System::::set_block_number(99999); + assert_ok!(Pallet::::reveal_timelocked_commitments(99999)); + assert!(RevealedCommitments::::get(netuid, &who).is_none()); + }); + } + + #[test] + fn reveal_timelocked_commitment_bad_signature_skips_decryption() { + new_test_ext().execute_with(|| { + let who = 10; + let netuid = 11; + System::::set_block_number(15); + let real_ct = produce_ciphertext(b"A valid plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).unwrap(); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let bad_signature = [0x33u8; 10]; + insert_drand_pulse(1000, &bad_signature); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); + assert!(RevealedCommitments::::get(netuid, &who).is_none()); + }); + } + + #[test] + fn reveal_timelocked_commitment_empty_decrypted_data_is_skipped() { + new_test_ext().execute_with(|| { + let who = 2; + let netuid = 3; + let commit_block = 100u64; + System::::set_block_number(commit_block); + let reveal_round = 1000; + let empty_ct = produce_ciphertext(&[], reveal_round); + let data = Data::TimelockEncrypted { + encrypted: empty_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).unwrap(); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).unwrap(); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); + assert!(RevealedCommitments::::get(netuid, &who).is_none()); + }); + } + + #[test] + fn reveal_timelocked_commitment_decode_failure_is_skipped() { + new_test_ext().execute_with(|| { + let who = 999; + let netuid = 8; + let commit_block = 42u64; + System::::set_block_number(commit_block); + let plaintext = vec![0xAA, 0xBB, 0xCC, 0xDD, 0xEE]; + let reveal_round = 1000; + let real_ct = produce_ciphertext(&plaintext, reveal_round); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).unwrap(); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX.as_bytes()).unwrap(); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + assert!(RevealedCommitments::::get(netuid, &who).is_none()); + }); + } } From 3c8d3de212a160b9a24c2f83028b7216d1fefb2d Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 3 Mar 2025 15:18:35 -0800 Subject: [PATCH 009/121] zepter --- pallets/commitments/Cargo.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index c4507d44e9..fb0091debf 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -62,7 +62,10 @@ std = [ "log/std", "pallet-drand/std", "tle/std", - "w3f-bls/std" + "w3f-bls/std", + "hex/std", + "rand_chacha/std", + "sha2/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", From 4a2b2986e93f0b9b9b39b6f61fc618e5d61a00e5 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 3 Mar 2025 15:42:08 -0800 Subject: [PATCH 010/121] address lints --- pallets/admin-utils/src/tests/mock.rs | 1 + pallets/commitments/src/mock.rs | 4 +- pallets/commitments/src/tests.rs | 886 +++++++++++++------------- pallets/commitments/src/types.rs | 1 + pallets/subtensor/src/tests/mock.rs | 1 + runtime/src/lib.rs | 2 + 6 files changed, 452 insertions(+), 443 deletions(-) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index aa7d8cd1da..47565d9990 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -386,6 +386,7 @@ parameter_types! { pub const CommitmentRateLimit: BlockNumber = 100; } +#[subtensor_macros::freeze_struct("7c76bd954afbb54e")] #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] pub struct MaxCommitFields; impl Get for MaxCommitFields { diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 6c38868962..5a4b3cace5 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -137,7 +137,7 @@ pub mod test_crypto { fn into_account(self) -> u64 { let mut bytes = [0u8; 32]; bytes.copy_from_slice(self.as_ref()); - u64::from_le_bytes(bytes[..8].try_into().unwrap()) + u64::from_le_bytes(bytes[..8].try_into().expect("Expected to not panic")) } } } @@ -176,7 +176,7 @@ where pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default() .build_storage() - .unwrap(); + .expect("Expected to not panic"); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index fd13683241..c40c0b0427 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -2,471 +2,475 @@ use codec::Encode; use sp_std::prelude::*; #[cfg(test)] -#[allow(clippy::indexing_slicing, clippy::unwrap_used)] -mod tests { - use super::*; - use crate::{ - CommitmentInfo, Config, Data, Error, Event, Pallet, RateLimit, RevealedCommitments, - mock::{ - DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, - new_test_ext, produce_ciphertext, - }, - }; - use frame_support::{BoundedVec, assert_noop, assert_ok, traits::Get}; - use frame_system::Pallet as System; - - #[test] - fn manual_data_type_info() { - let mut registry = scale_info::Registry::new(); - let type_id = registry.register_type(&scale_info::meta_type::()); - let registry: scale_info::PortableRegistry = registry.into(); - let type_info = registry.resolve(type_id.id).unwrap(); - - let check_type_info = |data: &Data| { - let variant_name = match data { - Data::None => "None".to_string(), - Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), - Data::Sha256(_) => "Sha256".to_string(), - Data::Keccak256(_) => "Keccak256".to_string(), - Data::ShaThree256(_) => "ShaThree256".to_string(), - Data::Raw(bytes) => format!("Raw{}", bytes.len()), - Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), - }; - if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { - let variant = variant - .variants - .iter() - .find(|v| v.name == variant_name) - .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); - - let encoded = data.encode(); - assert_eq!(encoded[0], variant.index); - - // For variants with fields, check the encoded length matches expected field lengths - if !variant.fields.is_empty() { - let expected_len = match data { - Data::None => 0, - Data::Raw(bytes) => bytes.len() as u32, - Data::BlakeTwo256(_) - | Data::Sha256(_) - | Data::Keccak256(_) - | Data::ShaThree256(_) => 32, - Data::TimelockEncrypted { - encrypted, - reveal_round, - } => { - // Calculate length: encrypted (length prefixed) + reveal_round (u64) - let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix - let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes - encrypted_len + reveal_round_len - } - }; - assert_eq!( - encoded.len() as u32 - 1, // Subtract variant byte - expected_len, - "Encoded length mismatch for variant {}", - variant_name - ); - } else { - assert_eq!( - encoded.len() as u32 - 1, - 0, - "Expected no fields for {}", - variant_name - ); - } +use crate::{ + CommitmentInfo, Config, Data, Error, Event, Pallet, RateLimit, RevealedCommitments, + mock::{ + DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, + new_test_ext, produce_ciphertext, + }, +}; +use frame_support::{BoundedVec, assert_noop, assert_ok, traits::Get}; +use frame_system::Pallet as System; + +#[allow(clippy::indexing_slicing)] +#[test] +fn manual_data_type_info() { + let mut registry = scale_info::Registry::new(); + let type_id = registry.register_type(&scale_info::meta_type::()); + let registry: scale_info::PortableRegistry = registry.into(); + let type_info = registry.resolve(type_id.id).expect("Expected not to panic"); + + let check_type_info = |data: &Data| { + let variant_name = match data { + Data::None => "None".to_string(), + Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), + Data::Sha256(_) => "Sha256".to_string(), + Data::Keccak256(_) => "Keccak256".to_string(), + Data::ShaThree256(_) => "ShaThree256".to_string(), + Data::Raw(bytes) => format!("Raw{}", bytes.len()), + Data::TimelockEncrypted { .. } => "TimelockEncrypted".to_string(), + }; + if let scale_info::TypeDef::Variant(variant) = &type_info.type_def { + let variant = variant + .variants + .iter() + .find(|v| v.name == variant_name) + .unwrap_or_else(|| panic!("Expected to find variant {}", variant_name)); + + let encoded = data.encode(); + assert_eq!(encoded[0], variant.index); + + // For variants with fields, check the encoded length matches expected field lengths + if !variant.fields.is_empty() { + let expected_len = match data { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u32, + Data::BlakeTwo256(_) + | Data::Sha256(_) + | Data::Keccak256(_) + | Data::ShaThree256(_) => 32, + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + // Calculate length: encrypted (length prefixed) + reveal_round (u64) + let encrypted_len = encrypted.encode().len() as u32; // Includes length prefix + let reveal_round_len = reveal_round.encode().len() as u32; // Typically 8 bytes + encrypted_len + reveal_round_len + } + }; + assert_eq!( + encoded.len() as u32 - 1, // Subtract variant byte + expected_len, + "Encoded length mismatch for variant {}", + variant_name + ); } else { - panic!("Should be a variant type"); + assert_eq!( + encoded.len() as u32 - 1, + 0, + "Expected no fields for {}", + variant_name + ); } - }; - - let mut data = vec![ - Data::None, - Data::BlakeTwo256(Default::default()), - Data::Sha256(Default::default()), - Data::Keccak256(Default::default()), - Data::ShaThree256(Default::default()), - ]; - - // Add Raw instances for all possible sizes - for n in 0..128 { - data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())); + } else { + panic!("Should be a variant type"); } + }; - // Add a TimelockEncrypted instance - data.push(Data::TimelockEncrypted { - encrypted: vec![0u8; 64].try_into().unwrap(), - reveal_round: 12345, - }); + let mut data = vec![ + Data::None, + Data::BlakeTwo256(Default::default()), + Data::Sha256(Default::default()), + Data::Keccak256(Default::default()), + Data::ShaThree256(Default::default()), + ]; + + // Add Raw instances for all possible sizes + for n in 0..128 { + data.push(Data::Raw( + vec![0u8; n as usize] + .try_into() + .expect("Expected not to panic"), + )); + } - for d in data.iter() { - check_type_info(d); - } + // Add a TimelockEncrypted instance + data.push(Data::TimelockEncrypted { + encrypted: vec![0u8; 64].try_into().expect("Expected not to panic"), + reveal_round: 12345, + }); + + for d in data.iter() { + check_type_info(d); } +} - #[test] - fn set_commitment_works() { - new_test_ext().execute_with(|| { - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info.clone() - )); - - let commitment = Pallet::::commitment_of(1, &1).unwrap(); - let initial_deposit: u64 = ::InitialDeposit::get(); - assert_eq!(commitment.deposit, initial_deposit); - assert_eq!(commitment.block, 1); - assert_eq!(Pallet::::last_commitment(1, &1), Some(1)); +#[test] +fn set_commitment_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), }); - } - #[test] - #[should_panic(expected = "BoundedVec::try_from failed")] - fn set_commitment_too_many_fields_panics() { - new_test_ext().execute_with(|| { - let max_fields: u32 = ::MaxFields::get(); - let fields = vec![Data::None; (max_fields + 1) as usize]; - - // This line will panic when 'BoundedVec::try_from(...)' sees too many items. - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), - ..Default::default() - }); - - // We never get here, because the constructor panics above. - let _ = - Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + let commitment = Pallet::::commitment_of(1, 1).expect("Expected not to panic"); + let initial_deposit: u64 = ::InitialDeposit::get(); + assert_eq!(commitment.deposit, initial_deposit); + assert_eq!(commitment.block, 1); + assert_eq!(Pallet::::last_commitment(1, 1), Some(1)); + }); +} + +#[test] +#[should_panic(expected = "BoundedVec::try_from failed")] +fn set_commitment_too_many_fields_panics() { + new_test_ext().execute_with(|| { + let max_fields: u32 = ::MaxFields::get(); + let fields = vec![Data::None; (max_fields + 1) as usize]; + + // This line will panic when 'BoundedVec::try_from(...)' sees too many items. + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(fields).expect("BoundedVec::try_from failed"), }); - } - #[test] - fn set_commitment_rate_limit_exceeded() { - new_test_ext().execute_with(|| { - let rate_limit = ::DefaultRateLimit::get(); - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info.clone() - )); - - // Set block number to just before rate limit expires - System::::set_block_number(rate_limit); - assert_noop!( - Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), - Error::::CommitmentSetRateLimitExceeded - ); - - // Set block number to after rate limit - System::::set_block_number(rate_limit + 1); - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info - )); + // We never get here, because the constructor panics above. + let _ = Pallet::::set_commitment(frame_system::RawOrigin::Signed(1).into(), 1, info); + }); +} + +#[test] +fn set_commitment_rate_limit_exceeded() { + new_test_ext().execute_with(|| { + let rate_limit = ::DefaultRateLimit::get(); + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), }); - } - #[test] - fn set_commitment_updates_deposit() { - new_test_ext().execute_with(|| { - System::::set_block_number(1); - let info1 = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Default::default(); 2]).unwrap(), - ..Default::default() - }); - let info2 = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Default::default(); 3]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info1 - )); - let initial_deposit: u64 = ::InitialDeposit::get(); - let field_deposit: u64 = ::FieldDeposit::get(); - let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; - assert_eq!( - Pallet::::commitment_of(1, &1).unwrap().deposit, - expected_deposit1 - ); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info2 - )); - let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; - assert_eq!( - Pallet::::commitment_of(1, &1).unwrap().deposit, - expected_deposit2 - ); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info.clone() + )); + + // Set block number to just before rate limit expires + System::::set_block_number(rate_limit); + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), + Error::::CommitmentSetRateLimitExceeded + ); + + // Set block number to after rate limit + System::::set_block_number(rate_limit + 1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + }); +} + +#[test] +fn set_commitment_updates_deposit() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info1 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 2]) + .expect("Expected not to panic"), + }); + let info2 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Default::default(); 3]) + .expect("Expected not to panic"), }); - } - #[test] - fn set_rate_limit_works() { - new_test_ext().execute_with(|| { - let default_rate_limit: u64 = ::DefaultRateLimit::get(); - assert_eq!(RateLimit::::get(), default_rate_limit); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info1 + )); + let initial_deposit: u64 = ::InitialDeposit::get(); + let field_deposit: u64 = ::FieldDeposit::get(); + let expected_deposit1: u64 = initial_deposit + 2u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, 1) + .expect("Expected not to panic") + .deposit, + expected_deposit1 + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info2 + )); + let expected_deposit2: u64 = initial_deposit + 3u64 * field_deposit; + assert_eq!( + Pallet::::commitment_of(1, 1) + .expect("Expected not to panic") + .deposit, + expected_deposit2 + ); + }); +} - assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); - assert_eq!(RateLimit::::get(), 200); +#[test] +fn set_rate_limit_works() { + new_test_ext().execute_with(|| { + let default_rate_limit: u64 = ::DefaultRateLimit::get(); + assert_eq!(RateLimit::::get(), default_rate_limit); - assert_noop!( - Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), - sp_runtime::DispatchError::BadOrigin - ); - }); - } + assert_ok!(Pallet::::set_rate_limit(RuntimeOrigin::root(), 200)); + assert_eq!(RateLimit::::get(), 200); - #[test] - fn event_emission_works() { - new_test_ext().execute_with(|| { - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).unwrap(), - ..Default::default() - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info - )); - - let events = System::::events(); - assert!(events.iter().any(|e| matches!( - &e.event, - RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) - ))); - }); - } + assert_noop!( + Pallet::::set_rate_limit(RuntimeOrigin::signed(1), 300), + sp_runtime::DispatchError::BadOrigin + ); + }); +} - #[test] - fn happy_path_timelock_commitments() { - new_test_ext().execute_with(|| { - let message_text = b"Hello timelock only!"; - let data_raw = Data::Raw( - message_text - .to_vec() - .try_into() - .expect("<= 128 bytes for Raw variant"), - ); - let fields_vec = vec![data_raw]; - let fields_bounded: BoundedVec::MaxFields> = - BoundedVec::try_from(fields_vec).expect("Too many fields"); - - let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { - fields: fields_bounded, - }; - - let plaintext = inner_info.encode(); - - let reveal_round = 1000; - let encrypted = produce_ciphertext(&plaintext, reveal_round); - - let data = Data::TimelockEncrypted { - encrypted: encrypted.clone(), - reveal_round, - }; - - let fields_outer: BoundedVec::MaxFields> = - BoundedVec::try_from(vec![data]).expect("Too many fields"); - let info_outer = CommitmentInfo { - fields: fields_outer, - }; - - let who = 123; - let netuid = 42; - System::::set_block_number(1); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(who), - netuid, - Box::new(info_outer) - )); - - let drand_signature_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).unwrap(); - insert_drand_pulse(reveal_round, &drand_signature_bytes); - - System::::set_block_number(9999); - assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); - - let revealed = - RevealedCommitments::::get(netuid, &who).expect("Should have revealed data"); - - let revealed_inner = &revealed.info; - assert_eq!(revealed_inner.fields.len(), 1); - match &revealed_inner.fields[0] { - Data::Raw(bounded_bytes) => { - assert_eq!( - bounded_bytes.as_slice(), - message_text, - "Decrypted text from on-chain storage must match the original message" - ); - } - other => panic!("Expected Data::Raw(...) in revealed, got {:?}", other), - } +#[test] +fn event_emission_works() { + new_test_ext().execute_with(|| { + System::::set_block_number(1); + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), }); - } - #[test] - fn reveal_timelocked_commitment_missing_round_does_nothing() { - new_test_ext().execute_with(|| { - let who = 1; - let netuid = 2; - System::::set_block_number(5); - let ciphertext = produce_ciphertext(b"My plaintext", 1000); - let data = Data::TimelockEncrypted { - encrypted: ciphertext, - reveal_round: 1000, - }; - let fields: BoundedVec<_, ::MaxFields> = - BoundedVec::try_from(vec![data]).unwrap(); - let info = CommitmentInfo { fields }; - let origin = RuntimeOrigin::signed(who); - assert_ok!(Pallet::::set_commitment( - origin, - netuid, - Box::new(info) - )); - System::::set_block_number(100_000); - assert_ok!(Pallet::::reveal_timelocked_commitments(100_000)); - assert!(RevealedCommitments::::get(netuid, &who).is_none()); - }); - } + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(1), + 1, + info + )); + + let events = System::::events(); + assert!(events.iter().any(|e| matches!( + &e.event, + RuntimeEvent::Commitments(Event::Commitment { netuid: 1, who: 1 }) + ))); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn happy_path_timelock_commitments() { + new_test_ext().execute_with(|| { + let message_text = b"Hello timelock only!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes for Raw variant"), + ); + let fields_vec = vec![data_raw]; + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(fields_vec).expect("Too many fields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let data = Data::TimelockEncrypted { + encrypted: encrypted.clone(), + reveal_round, + }; + + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Too many fields"); + let info_outer = CommitmentInfo { + fields: fields_outer, + }; - #[test] - fn reveal_timelocked_commitment_cant_deserialize_ciphertext() { - new_test_ext().execute_with(|| { - let who = 42; - let netuid = 9; - System::::set_block_number(10); - let good_ct = produce_ciphertext(b"Some data", 1000); - let mut corrupted = good_ct.into_inner(); - if !corrupted.is_empty() { - corrupted[0] = 0xFF; + let who = 123; + let netuid = 42; + System::::set_block_number(1); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = + hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + + let revealed = + RevealedCommitments::::get(netuid, who).expect("Should have revealed data"); + + let revealed_inner = &revealed.info; + assert_eq!(revealed_inner.fields.len(), 1); + match &revealed_inner.fields[0] { + Data::Raw(bounded_bytes) => { + assert_eq!( + bounded_bytes.as_slice(), + message_text, + "Decrypted text from on-chain storage must match the original message" + ); } - let corrupted_ct = BoundedVec::try_from(corrupted).unwrap(); - let data = Data::TimelockEncrypted { - encrypted: corrupted_ct, - reveal_round: 1000, - }; - let fields = BoundedVec::try_from(vec![data]).unwrap(); - let info = CommitmentInfo { fields }; - let origin = RuntimeOrigin::signed(who); - assert_ok!(Pallet::::set_commitment( - origin, - netuid, - Box::new(info) - )); - let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).unwrap(); - insert_drand_pulse(1000, &sig_bytes); - System::::set_block_number(99999); - assert_ok!(Pallet::::reveal_timelocked_commitments(99999)); - assert!(RevealedCommitments::::get(netuid, &who).is_none()); - }); - } + other => panic!("Expected Data::Raw(...) in revealed, got {:?}", other), + } + }); +} - #[test] - fn reveal_timelocked_commitment_bad_signature_skips_decryption() { - new_test_ext().execute_with(|| { - let who = 10; - let netuid = 11; - System::::set_block_number(15); - let real_ct = produce_ciphertext(b"A valid plaintext", 1000); - let data = Data::TimelockEncrypted { - encrypted: real_ct, - reveal_round: 1000, - }; - let fields: BoundedVec<_, ::MaxFields> = - BoundedVec::try_from(vec![data]).unwrap(); - let info = CommitmentInfo { fields }; - let origin = RuntimeOrigin::signed(who); - assert_ok!(Pallet::::set_commitment( - origin, - netuid, - Box::new(info) - )); - let bad_signature = [0x33u8; 10]; - insert_drand_pulse(1000, &bad_signature); - System::::set_block_number(10_000); - assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); - assert!(RevealedCommitments::::get(netuid, &who).is_none()); - }); - } +#[test] +fn reveal_timelocked_commitment_missing_round_does_nothing() { + new_test_ext().execute_with(|| { + let who = 1; + let netuid = 2; + System::::set_block_number(5); + let ciphertext = produce_ciphertext(b"My plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: ciphertext, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + System::::set_block_number(100_000); + assert_ok!(Pallet::::reveal_timelocked_commitments(100_000)); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} - #[test] - fn reveal_timelocked_commitment_empty_decrypted_data_is_skipped() { - new_test_ext().execute_with(|| { - let who = 2; - let netuid = 3; - let commit_block = 100u64; - System::::set_block_number(commit_block); - let reveal_round = 1000; - let empty_ct = produce_ciphertext(&[], reveal_round); - let data = Data::TimelockEncrypted { - encrypted: empty_ct, - reveal_round, - }; - let fields = BoundedVec::try_from(vec![data]).unwrap(); - let info = CommitmentInfo { fields }; - let origin = RuntimeOrigin::signed(who); - assert_ok!(Pallet::::set_commitment( - origin, - netuid, - Box::new(info) - )); - let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).unwrap(); - insert_drand_pulse(reveal_round, &sig_bytes); - System::::set_block_number(10_000); - assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); - assert!(RevealedCommitments::::get(netuid, &who).is_none()); - }); - } +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_commitment_cant_deserialize_ciphertext() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 9; + System::::set_block_number(10); + let good_ct = produce_ciphertext(b"Some data", 1000); + let mut corrupted = good_ct.into_inner(); + if !corrupted.is_empty() { + corrupted[0] = 0xFF; + } + let corrupted_ct = BoundedVec::try_from(corrupted).expect("Expected not to panic"); + let data = Data::TimelockEncrypted { + encrypted: corrupted_ct, + reveal_round: 1000, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(1000, &sig_bytes); + System::::set_block_number(99999); + assert_ok!(Pallet::::reveal_timelocked_commitments(99999)); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} - #[test] - fn reveal_timelocked_commitment_decode_failure_is_skipped() { - new_test_ext().execute_with(|| { - let who = 999; - let netuid = 8; - let commit_block = 42u64; - System::::set_block_number(commit_block); - let plaintext = vec![0xAA, 0xBB, 0xCC, 0xDD, 0xEE]; - let reveal_round = 1000; - let real_ct = produce_ciphertext(&plaintext, reveal_round); - let data = Data::TimelockEncrypted { - encrypted: real_ct, - reveal_round, - }; - let fields = BoundedVec::try_from(vec![data]).unwrap(); - let info = CommitmentInfo { fields }; - let origin = RuntimeOrigin::signed(who); - assert_ok!(Pallet::::set_commitment( - origin, - netuid, - Box::new(info) - )); - let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX.as_bytes()).unwrap(); - insert_drand_pulse(reveal_round, &sig_bytes); - System::::set_block_number(9999); - assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); - assert!(RevealedCommitments::::get(netuid, &who).is_none()); - }); - } +#[test] +fn reveal_timelocked_commitment_bad_signature_skips_decryption() { + new_test_ext().execute_with(|| { + let who = 10; + let netuid = 11; + System::::set_block_number(15); + let real_ct = produce_ciphertext(b"A valid plaintext", 1000); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round: 1000, + }; + let fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let bad_signature = [0x33u8; 10]; + insert_drand_pulse(1000, &bad_signature); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_empty_decrypted_data_is_skipped() { + new_test_ext().execute_with(|| { + let who = 2; + let netuid = 3; + let commit_block = 100u64; + System::::set_block_number(commit_block); + let reveal_round = 1000; + let empty_ct = produce_ciphertext(&[], reveal_round); + let data = Data::TimelockEncrypted { + encrypted: empty_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(10_000); + assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); +} + +#[test] +fn reveal_timelocked_commitment_decode_failure_is_skipped() { + new_test_ext().execute_with(|| { + let who = 999; + let netuid = 8; + let commit_block = 42u64; + System::::set_block_number(commit_block); + let plaintext = vec![0xAA, 0xBB, 0xCC, 0xDD, 0xEE]; + let reveal_round = 1000; + let real_ct = produce_ciphertext(&plaintext, reveal_round); + let data = Data::TimelockEncrypted { + encrypted: real_ct, + reveal_round, + }; + let fields = BoundedVec::try_from(vec![data]).expect("Expected not to panic"); + let info = CommitmentInfo { fields }; + let origin = RuntimeOrigin::signed(who); + assert_ok!(Pallet::::set_commitment( + origin, + netuid, + Box::new(info) + )); + let sig_bytes = + hex::decode(DRAND_QUICKNET_SIG_HEX.as_bytes()).expect("Expected not to panic"); + insert_drand_pulse(reveal_round, &sig_bytes); + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + assert!(RevealedCommitments::::get(netuid, who).is_none()); + }); } diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index c59c7212e6..400c102443 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -338,6 +338,7 @@ pub struct CommitmentInfo> { pub const MAX_TIMELOCK_COMMITMENT_SIZE_BYTES: u32 = 1024; /// Contains the decrypted data of a revealed commitment. +#[freeze_struct("bf575857b57f9bef")] #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] pub struct RevealedData, BlockNumber> { pub info: CommitmentInfo, diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index bdaca2269b..4e767b7c81 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -541,6 +541,7 @@ impl frame_system::offchain::CreateSignedTransaction> f } } +#[freeze_struct("7c76bd954afbb54e")] #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] pub struct MaxCommitFields; impl Get for MaxCommitFields { diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 8c2fbd43bf..09a551ce39 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -996,6 +996,7 @@ parameter_types! { pub const CommitmentRateLimit: BlockNumber = 100; // Allow commitment every 100 blocks } +#[subtensor_macros::freeze_struct("7c76bd954afbb54e")] #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] pub struct MaxCommitFields; impl Get for MaxCommitFields { @@ -1004,6 +1005,7 @@ impl Get for MaxCommitFields { } } +#[subtensor_macros::freeze_struct("c39297f5eb97ee82")] pub struct AllowCommitments; impl CanCommit for AllowCommitments { #[cfg(not(feature = "runtime-benchmarks"))] From 087288981e79800a3a0d7605c7872293eb85bdc7 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 4 Mar 2025 08:38:27 -0800 Subject: [PATCH 011/121] add import --- runtime/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index fa0b930ada..2953299edf 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -89,6 +89,8 @@ pub use sp_runtime::{Perbill, Permill}; use core::marker::PhantomData; +use scale_info::TypeInfo; + // Frontier use fp_rpc::TransactionStatus; use pallet_ethereum::{Call::transact, PostLogContent, Transaction as EthereumTransaction}; From 7e76fb1ed226d95aecd1f4dc90b1365ab3b560b0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 4 Mar 2025 12:32:05 -0800 Subject: [PATCH 012/121] handle multiple fields --- pallets/commitments/src/lib.rs | 219 ++++++++++++++++++------------- pallets/commitments/src/tests.rs | 194 +++++++++++++++++++++++++++ 2 files changed, 323 insertions(+), 90 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 745e06b7c8..e03fce646c 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -25,6 +25,7 @@ use tle::{ tlock::{TLECiphertext, tld}, }; use w3f_bls::EngineBLS; +use scale_info::prelude::collections::BTreeSet; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -386,109 +387,147 @@ impl Pallet { .map_err(|_| "Failed to convert u64 to BlockNumberFor")?; for (netuid, who, mut registration) in >::iter() { - if let Some(Data::TimelockEncrypted { - encrypted, - reveal_round, - .. - }) = registration - .info - .fields - .clone() - .iter() - .find(|data| matches!(data, Data::TimelockEncrypted { .. })) - { - // Check if the corresponding Drand round data exists - let pulse = match pallet_drand::Pulses::::get(*reveal_round) { - Some(p) => p, - None => continue, - }; - - // Prepare the signature bytes - let signature_bytes = pulse - .signature - .strip_prefix(b"0x") - .unwrap_or(&pulse.signature); - let sig_reader = &mut &signature_bytes[..]; - let sig = - ::SignatureGroup::deserialize_compressed(sig_reader) - .map_err(|e| { - log::warn!( - "Failed to deserialize drand signature for {:?}: {:?}", - who, - e + let original_fields = registration.info.fields.clone(); + let mut remain_fields = Vec::new(); + let mut revealed_fields = Vec::new(); + + for data in original_fields { + match data { + Data::TimelockEncrypted { + encrypted, + reveal_round, + } => { + let pulse = match pallet_drand::Pulses::::get(reveal_round) { + Some(p) => p, + None => { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + + let sig_reader = &mut &signature_bytes[..]; + let sig = + ::SignatureGroup::deserialize_compressed( + sig_reader, ) - }) - .ok(); - - let sig = match sig { - Some(s) => s, - None => continue, - }; + .map_err(|e| { + log::warn!( + "Failed to deserialize drand signature for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let Some(sig) = sig else { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + }; + + let reader = &mut &encrypted[..]; + let commit = TLECiphertext::::deserialize_compressed(reader) + .map_err(|e| { + log::warn!( + "Failed to deserialize TLECiphertext for {:?}: {:?}", + who, + e + ) + }) + .ok(); + + let Some(commit) = commit else { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + }; + + let decrypted_bytes: Vec = + tld::(commit, sig) + .map_err(|e| { + log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) + }) + .ok() + .unwrap_or_default(); + + if decrypted_bytes.is_empty() { + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + + let mut reader = &decrypted_bytes[..]; + let revealed_info: CommitmentInfo = + match Decode::decode(&mut reader) { + Ok(info) => info, + Err(e) => { + log::warn!( + "Failed to decode decrypted data for {:?}: {:?}", + who, + e + ); + remain_fields.push(Data::TimelockEncrypted { + encrypted, + reveal_round, + }); + continue; + } + }; + + revealed_fields.push(revealed_info); + } - // Attempt to deserialize the encrypted commitment - let reader = &mut &encrypted[..]; - let commit = TLECiphertext::::deserialize_compressed(reader) - .map_err(|e| { - log::warn!("Failed to deserialize TLECiphertext for {:?}: {:?}", who, e) - }) - .ok(); - - let commit = match commit { - Some(c) => c, - None => continue, - }; + other => remain_fields.push(other), + } + } - // Decrypt the timelock commitment - let decrypted_bytes: Vec = - tld::(commit, sig) - .map_err(|e| { - log::warn!("Failed to decrypt timelock for {:?}: {:?}", who, e) - }) - .ok() - .unwrap_or_default(); - - if decrypted_bytes.is_empty() { - continue; + if !revealed_fields.is_empty() { + let mut all_revealed_data = Vec::new(); + for info in revealed_fields { + all_revealed_data.extend(info.fields.into_inner()); } - // Decode the decrypted bytes into CommitmentInfo - let mut reader = &decrypted_bytes[..]; - let revealed_info: CommitmentInfo = match Decode::decode(&mut reader) - { - Ok(info) => info, - Err(e) => { - log::warn!("Failed to decode decrypted data for {:?}: {:?}", who, e); - continue; - } + let bounded_revealed = BoundedVec::try_from(all_revealed_data) + .map_err(|_| "Could not build BoundedVec for revealed fields")?; + + let combined_revealed_info = CommitmentInfo { + fields: bounded_revealed, }; - // Store the revealed data let revealed_data = RevealedData { - info: revealed_info, + info: combined_revealed_info, revealed_block: current_block, deposit: registration.deposit, }; >::insert(netuid, &who, revealed_data); - // Remove the TimelockEncrypted field from the original commitment - let filtered_fields: Vec = registration - .info - .fields - .into_iter() - .filter(|data| { - !matches!( - data, - Data::TimelockEncrypted { - reveal_round: r, .. - } if r == reveal_round - ) - }) - .collect(); - - registration.info.fields = BoundedVec::try_from(filtered_fields) - .map_err(|_| "Failed to filter timelock fields")?; - - Self::deposit_event(Event::CommitmentRevealed { netuid, who }); + let who_clone = who.clone(); + Self::deposit_event(Event::CommitmentRevealed { + netuid, + who: who_clone, + }); + } + + registration.info.fields = BoundedVec::try_from(remain_fields) + .map_err(|_| "Failed to build BoundedVec for remain_fields")?; + + match registration.info.fields.is_empty() { + true => >::remove(netuid, &who), + false => >::insert(netuid, who, registration), } } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index c40c0b0427..4f308747f6 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -474,3 +474,197 @@ fn reveal_timelocked_commitment_decode_failure_is_skipped() { assert!(RevealedCommitments::::get(netuid, who).is_none()); }); } + +#[test] +fn reveal_timelocked_commitment_single_field_entry_is_removed_after_reveal() { + new_test_ext().execute_with(|| { + let message_text = b"Single field timelock test!"; + let data_raw = Data::Raw( + message_text + .to_vec() + .try_into() + .expect("Message must be <=128 bytes for Raw variant"), + ); + + let fields_bounded: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![data_raw]).expect("BoundedVec creation must not fail"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_bounded, + }; + + let plaintext = inner_info.encode(); + let reveal_round = 1000; + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let timelock_data = Data::TimelockEncrypted { + encrypted, + reveal_round, + }; + let fields_outer: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![timelock_data]).expect("Too many fields"); + let info_outer: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: fields_outer, + }; + + let who = 555; + let netuid = 777; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + let drand_signature_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX) + .expect("Must decode DRAND_QUICKNET_SIG_HEX successfully"); + insert_drand_pulse(reveal_round, &drand_signature_bytes); + + System::::set_block_number(9999); + assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + + let revealed = + RevealedCommitments::::get(netuid, who).expect("Expected to find revealed data"); + assert_eq!( + revealed.info.fields.len(), + 1, + "Should have exactly 1 revealed field" + ); + + assert!( + crate::CommitmentOf::::get(netuid, who).is_none(), + "Expected CommitmentOf entry to be removed after reveal" + ); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn reveal_timelocked_multiple_fields_only_correct_ones_removed() { + new_test_ext().execute_with(|| { + let round_1000 = 1000; + + // 2) Build two CommitmentInfos, one for each timelock + let msg_1 = b"Hello from TLE #1"; + let inner_1_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + msg_1.to_vec().try_into().expect("expected not to panic"), + )]) + .expect("BoundedVec of size 1"); + let inner_info_1 = CommitmentInfo { + fields: inner_1_fields, + }; + let encoded_1 = inner_info_1.encode(); + let ciphertext_1 = produce_ciphertext(&encoded_1, round_1000); + let timelock_1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + let msg_2 = b"Hello from TLE #2"; + let inner_2_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + msg_2.to_vec().try_into().expect("expected not to panic"), + )]) + .expect("BoundedVec of size 1"); + let inner_info_2 = CommitmentInfo { + fields: inner_2_fields, + }; + let encoded_2 = inner_info_2.encode(); + let ciphertext_2 = produce_ciphertext(&encoded_2, round_1000); + let timelock_2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_1000, + }; + + // 3) One plain Data::Raw field (non-timelocked) + let raw_bytes = b"Plain non-timelocked data"; + let data_raw = Data::Raw( + raw_bytes + .to_vec() + .try_into() + .expect("expected not to panic"), + ); + + // 4) Outer commitment: 3 fields total => [Raw, TLE #1, TLE #2] + let outer_fields = BoundedVec::try_from(vec![ + data_raw.clone(), + timelock_1.clone(), + timelock_2.clone(), + ]) + .expect("T::MaxFields >= 3 in the test config, or at least 3 here"); + let outer_info = CommitmentInfo { + fields: outer_fields, + }; + + // 5) Insert the commitment + let who = 123; + let netuid = 999; + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(outer_info) + )); + let initial = Pallet::::commitment_of(netuid, who).expect("Must exist"); + assert_eq!(initial.info.fields.len(), 3, "3 fields inserted"); + + // 6) Insert Drand signature for round=1000 + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("decode DRAND sig"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + // 7) Reveal once + System::::set_block_number(50); + assert_ok!(Pallet::::reveal_timelocked_commitments(50)); + + // => The pallet code has removed *both* TLE #1 and TLE #2 in this single call! + let after_reveal = Pallet::::commitment_of(netuid, who) + .expect("Should still exist with leftover fields"); + // Only the raw, non-timelocked field remains + assert_eq!( + after_reveal.info.fields.len(), + 1, + "Both timelocks referencing round=1000 got removed at once" + ); + assert_eq!( + after_reveal.info.fields[0], data_raw, + "Only the raw field is left" + ); + + // 8) Check revealed data + let revealed_data = RevealedCommitments::::get(netuid, who) + .expect("Expected revealed data for TLE #1 and #2"); + + assert_eq!( + revealed_data.info.fields.len(), + 2, + "We revealed both TLE #1 and TLE #2 in the same pass" + ); + let mut found_msg1 = false; + let mut found_msg2 = false; + for item in &revealed_data.info.fields { + if let Data::Raw(bytes) = item { + if bytes.as_slice() == msg_1 { + found_msg1 = true; + } else if bytes.as_slice() == msg_2 { + found_msg2 = true; + } + } + } + assert!( + found_msg1 && found_msg2, + "Should see both TLE #1 and TLE #2 in the revealed data" + ); + + // 9) A second reveal call now does nothing, because no timelocks remain + System::::set_block_number(51); + assert_ok!(Pallet::::reveal_timelocked_commitments(51)); + + let after_second = Pallet::::commitment_of(netuid, who).expect("Still must exist"); + assert_eq!( + after_second.info.fields.len(), + 1, + "No new fields were removed, because no timelocks remain" + ); + }); +} From 937368a565f22ddc1c4f460d1fa8ae81baf712df Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 4 Mar 2025 13:22:04 -0800 Subject: [PATCH 013/121] only iterate over timelocked commitments in reveal --- pallets/commitments/src/lib.rs | 59 ++++++-- pallets/commitments/src/tests.rs | 242 ++++++++++++++++++++++++++++++- 2 files changed, 290 insertions(+), 11 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index e03fce646c..c7d66146c8 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -17,6 +17,7 @@ pub use weights::WeightInfo; use ark_serialize::CanonicalDeserialize; use frame_support::{BoundedVec, traits::Currency}; +use scale_info::prelude::collections::BTreeSet; use sp_runtime::{Saturating, traits::Zero}; use sp_std::{boxed::Box, vec::Vec}; use tle::{ @@ -25,7 +26,6 @@ use tle::{ tlock::{TLECiphertext, tld}, }; use w3f_bls::EngineBLS; -use scale_info::prelude::collections::BTreeSet; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -120,6 +120,12 @@ pub mod pallet { #[pallet::storage] pub type RateLimit = StorageValue<_, BlockNumberFor, ValueQuery, DefaultRateLimit>; + /// Tracks all CommitmentOf that have at least one timelocked field. + #[pallet::storage] + #[pallet::getter(fn timelocked_index)] + pub type TimelockedIndex = + StorageValue<_, BTreeSet<(u16, T::AccountId)>, ValueQuery>; + /// Identity data by account #[pallet::storage] #[pallet::getter(fn commitment_of)] @@ -225,11 +231,22 @@ pub mod pallet { { Self::deposit_event(Event::TimelockCommitment { netuid, - who, + who: who.clone(), reveal_round: *reveal_round, }); + + TimelockedIndex::::mutate(|index| { + index.insert((netuid, who.clone())); + }); } else { - Self::deposit_event(Event::Commitment { netuid, who }); + Self::deposit_event(Event::Commitment { + netuid, + who: who.clone(), + }); + + TimelockedIndex::::mutate(|index| { + index.remove(&(netuid, who.clone())); + }); } Ok(()) @@ -386,7 +403,15 @@ impl Pallet { .try_into() .map_err(|_| "Failed to convert u64 to BlockNumberFor")?; - for (netuid, who, mut registration) in >::iter() { + let index = TimelockedIndex::::get(); + for (netuid, who) in index.clone() { + let Some(mut registration) = >::get(netuid, &who) else { + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + continue; + }; + let original_fields = registration.info.fields.clone(); let mut remain_fields = Vec::new(); let mut revealed_fields = Vec::new(); @@ -412,7 +437,6 @@ impl Pallet { .signature .strip_prefix(b"0x") .unwrap_or(&pulse.signature); - let sig_reader = &mut &signature_bytes[..]; let sig = ::SignatureGroup::deserialize_compressed( @@ -514,11 +538,9 @@ impl Pallet { deposit: registration.deposit, }; >::insert(netuid, &who, revealed_data); - - let who_clone = who.clone(); Self::deposit_event(Event::CommitmentRevealed { netuid, - who: who_clone, + who: who.clone(), }); } @@ -526,8 +548,25 @@ impl Pallet { .map_err(|_| "Failed to build BoundedVec for remain_fields")?; match registration.info.fields.is_empty() { - true => >::remove(netuid, &who), - false => >::insert(netuid, who, registration), + true => { + >::remove(netuid, &who); + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + } + false => { + >::insert(netuid, &who, ®istration); + let has_timelock = registration + .info + .fields + .iter() + .any(|f| matches!(f, Data::TimelockEncrypted { .. })); + if !has_timelock { + TimelockedIndex::::mutate(|idx| { + idx.remove(&(netuid, who.clone())); + }); + } + } } } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 4f308747f6..c226c0eee3 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -3,7 +3,8 @@ use sp_std::prelude::*; #[cfg(test)] use crate::{ - CommitmentInfo, Config, Data, Error, Event, Pallet, RateLimit, RevealedCommitments, + CommitmentInfo, CommitmentOf, Config, Data, Error, Event, Pallet, RateLimit, + RevealedCommitments, TimelockedIndex, mock::{ DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, new_test_ext, produce_ciphertext, @@ -668,3 +669,242 @@ fn reveal_timelocked_multiple_fields_only_correct_ones_removed() { ); }); } + +#[test] +fn test_index_lifecycle_no_timelocks_updates_in_out() { + new_test_ext().execute_with(|| { + let netuid = 100; + let who = 999; + + // + // A) Create a commitment with **no** timelocks => shouldn't be in index + // + let no_tl_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![]).expect("Empty is ok"); + let info_no_tl = CommitmentInfo { + fields: no_tl_fields, + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_no_tl) + )); + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "User with no timelocks must not appear in index" + ); + + // + // B) Update the commitment to have a timelock => enters index + // + let tl_fields: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![Data::TimelockEncrypted { + encrypted: Default::default(), + reveal_round: 1234, + }]) + .expect("Expected success"); + let info_with_tl = CommitmentInfo { fields: tl_fields }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_with_tl) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "User must appear in index after adding a timelock" + ); + + // + // C) Remove the timelock => leaves index + // + let back_to_no_tl: BoundedVec<_, ::MaxFields> = + BoundedVec::try_from(vec![]).expect("Expected success"); + let info_remove_tl = CommitmentInfo { + fields: back_to_no_tl, + }; + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_remove_tl) + )); + + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "User must be removed from index after losing all timelocks" + ); + }); +} + +#[test] +fn two_timelocks_partial_then_full_reveal() { + new_test_ext().execute_with(|| { + let netuid_a = 1; + let who_a = 10; + let round_1000 = 1000; + let round_2000 = 2000; + + let drand_sig_1000 = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected success"); + insert_drand_pulse(round_1000, &drand_sig_1000); + + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + + // + // First Timelock => round=1000 + // + let msg_a1 = b"UserA timelock #1 (round=1000)"; + let inner_1_fields: BoundedVec::MaxFields> = BoundedVec::try_from( + vec![Data::Raw(msg_a1.to_vec().try_into().expect("Expected success"))], + ) + .expect("MaxFields >= 1"); + let inner_info_1: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_1_fields, + }; + let encoded_1 = inner_info_1.encode(); + let ciphertext_1 = produce_ciphertext(&encoded_1, round_1000); + let tle_a1 = Data::TimelockEncrypted { + encrypted: ciphertext_1, + reveal_round: round_1000, + }; + + // + // Second Timelock => round=2000 + // + let msg_a2 = b"UserA timelock #2 (round=2000)"; + let inner_2_fields: BoundedVec::MaxFields> = BoundedVec::try_from( + vec![Data::Raw(msg_a2.to_vec().try_into().expect("Expected success"))], + ) + .expect("MaxFields >= 1"); + let inner_info_2: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_2_fields, + }; + let encoded_2 = inner_info_2.encode(); + let ciphertext_2 = produce_ciphertext(&encoded_2, round_2000); + let tle_a2 = Data::TimelockEncrypted { + encrypted: ciphertext_2, + reveal_round: round_2000, + }; + + // + // Insert outer commitment with both timelocks + // + let fields_a: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![tle_a1, tle_a2]).expect("2 fields, must be <= MaxFields"); + let info_a: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: fields_a }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who_a), + netuid_a, + Box::new(info_a) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "User A must be in index with 2 timelocks" + ); + + System::::set_block_number(10); + assert_ok!(Pallet::::reveal_timelocked_commitments(10)); + + let leftover_a1 = CommitmentOf::::get(netuid_a, who_a).expect("still there"); + assert_eq!( + leftover_a1.info.fields.len(), + 1, + "Only the round=1000 timelock removed; round=2000 remains" + ); + assert!( + TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "Still in index with leftover timelock" + ); + + // + // Insert signature for round=2000 => final reveal => leftover=none => removed + // + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("Expected success"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(11); + assert_ok!(Pallet::::reveal_timelocked_commitments(11)); + + let leftover_a2 = CommitmentOf::::get(netuid_a, who_a); + assert!( + leftover_a2.is_none(), + "All timelocks removed => none leftover" + ); + assert!( + !TimelockedIndex::::get().contains(&(netuid_a, who_a)), + "User A removed from index after final reveal" + ); + }); +} + +#[test] +fn single_timelock_reveal_later_round() { + new_test_ext().execute_with(|| { + let netuid_b = 2; + let who_b = 20; + let round_2000 = 2000; + + let drand_sig_2000_hex = + "b6cb8f482a0b15d45936a4c4ea08e98a087e71787caee3f4d07a8a9843b1bc5423c6b3c22f446488b3137eaca799c77e"; + let drand_sig_2000 = hex::decode(drand_sig_2000_hex).expect("Expected success"); + insert_drand_pulse(round_2000, &drand_sig_2000); + + let msg_b = b"UserB single timelock (round=2000)"; + + let inner_b_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw(msg_b.to_vec().try_into().expect("Expected success"))]) + .expect("MaxFields >= 1"); + let inner_info_b: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_b_fields, + }; + let encoded_b = inner_info_b.encode(); + let ciphertext_b = produce_ciphertext(&encoded_b, round_2000); + let tle_b = Data::TimelockEncrypted { + encrypted: ciphertext_b, + reveal_round: round_2000, + }; + + let fields_b: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![tle_b]).expect("1 field"); + let info_b: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: fields_b }; + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who_b), + netuid_b, + Box::new(info_b) + )); + assert!( + TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "User B in index" + ); + + // Remove the round=2000 signature so first reveal does nothing + pallet_drand::Pulses::::remove(round_2000); + + System::::set_block_number(20); + assert_ok!(Pallet::::reveal_timelocked_commitments(20)); + + let leftover_b1 = CommitmentOf::::get(netuid_b, who_b).expect("still there"); + assert_eq!( + leftover_b1.info.fields.len(), + 1, + "No signature => timelock remains" + ); + assert!( + TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "Still in index with leftover timelock" + ); + + insert_drand_pulse(round_2000, &drand_sig_2000); + + System::::set_block_number(21); + assert_ok!(Pallet::::reveal_timelocked_commitments(21)); + + let leftover_b2 = CommitmentOf::::get(netuid_b, who_b); + assert!(leftover_b2.is_none(), "Timelock removed => leftover=none"); + assert!( + !TimelockedIndex::::get().contains(&(netuid_b, who_b)), + "User B removed from index after final reveal" + ); + }); +} From 99b5a28b36a0e765a734b3202e80e9fd917079ca Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 6 Mar 2025 10:06:17 -0800 Subject: [PATCH 014/121] remove block_number param --- pallets/commitments/src/lib.rs | 7 ++---- pallets/commitments/src/tests.rs | 26 ++++++++++---------- pallets/subtensor/src/coinbase/block_step.rs | 2 +- pallets/subtensor/src/utils/misc.rs | 6 ++--- 4 files changed, 18 insertions(+), 23 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index c7d66146c8..850fe23e38 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -398,11 +398,8 @@ where } impl Pallet { - pub fn reveal_timelocked_commitments(current_block: u64) -> DispatchResult { - let current_block = current_block - .try_into() - .map_err(|_| "Failed to convert u64 to BlockNumberFor")?; - + pub fn reveal_timelocked_commitments() -> DispatchResult { + let current_block = >::block_number(); let index = TimelockedIndex::::get(); for (netuid, who) in index.clone() { let Some(mut registration) = >::get(netuid, &who) else { diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index c226c0eee3..bcb775f9ce 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -309,7 +309,7 @@ fn happy_path_timelock_commitments() { insert_drand_pulse(reveal_round, &drand_signature_bytes); System::::set_block_number(9999); - assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let revealed = RevealedCommitments::::get(netuid, who).expect("Should have revealed data"); @@ -350,7 +350,7 @@ fn reveal_timelocked_commitment_missing_round_does_nothing() { Box::new(info) )); System::::set_block_number(100_000); - assert_ok!(Pallet::::reveal_timelocked_commitments(100_000)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); assert!(RevealedCommitments::::get(netuid, who).is_none()); }); } @@ -383,7 +383,7 @@ fn reveal_timelocked_commitment_cant_deserialize_ciphertext() { let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); insert_drand_pulse(1000, &sig_bytes); System::::set_block_number(99999); - assert_ok!(Pallet::::reveal_timelocked_commitments(99999)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); assert!(RevealedCommitments::::get(netuid, who).is_none()); }); } @@ -411,7 +411,7 @@ fn reveal_timelocked_commitment_bad_signature_skips_decryption() { let bad_signature = [0x33u8; 10]; insert_drand_pulse(1000, &bad_signature); System::::set_block_number(10_000); - assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); assert!(RevealedCommitments::::get(netuid, who).is_none()); }); } @@ -440,7 +440,7 @@ fn reveal_timelocked_commitment_empty_decrypted_data_is_skipped() { let sig_bytes = hex::decode(DRAND_QUICKNET_SIG_HEX).expect("Expected not to panic"); insert_drand_pulse(reveal_round, &sig_bytes); System::::set_block_number(10_000); - assert_ok!(Pallet::::reveal_timelocked_commitments(10_000)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); assert!(RevealedCommitments::::get(netuid, who).is_none()); }); } @@ -471,7 +471,7 @@ fn reveal_timelocked_commitment_decode_failure_is_skipped() { hex::decode(DRAND_QUICKNET_SIG_HEX.as_bytes()).expect("Expected not to panic"); insert_drand_pulse(reveal_round, &sig_bytes); System::::set_block_number(9999); - assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); assert!(RevealedCommitments::::get(netuid, who).is_none()); }); } @@ -522,7 +522,7 @@ fn reveal_timelocked_commitment_single_field_entry_is_removed_after_reveal() { insert_drand_pulse(reveal_round, &drand_signature_bytes); System::::set_block_number(9999); - assert_ok!(Pallet::::reveal_timelocked_commitments(9999)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let revealed = RevealedCommitments::::get(netuid, who).expect("Expected to find revealed data"); @@ -616,7 +616,7 @@ fn reveal_timelocked_multiple_fields_only_correct_ones_removed() { // 7) Reveal once System::::set_block_number(50); - assert_ok!(Pallet::::reveal_timelocked_commitments(50)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); // => The pallet code has removed *both* TLE #1 and TLE #2 in this single call! let after_reveal = Pallet::::commitment_of(netuid, who) @@ -659,7 +659,7 @@ fn reveal_timelocked_multiple_fields_only_correct_ones_removed() { // 9) A second reveal call now does nothing, because no timelocks remain System::::set_block_number(51); - assert_ok!(Pallet::::reveal_timelocked_commitments(51)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let after_second = Pallet::::commitment_of(netuid, who).expect("Still must exist"); assert_eq!( @@ -803,7 +803,7 @@ fn two_timelocks_partial_then_full_reveal() { ); System::::set_block_number(10); - assert_ok!(Pallet::::reveal_timelocked_commitments(10)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let leftover_a1 = CommitmentOf::::get(netuid_a, who_a).expect("still there"); assert_eq!( @@ -823,7 +823,7 @@ fn two_timelocks_partial_then_full_reveal() { insert_drand_pulse(round_2000, &drand_sig_2000); System::::set_block_number(11); - assert_ok!(Pallet::::reveal_timelocked_commitments(11)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let leftover_a2 = CommitmentOf::::get(netuid_a, who_a); assert!( @@ -882,7 +882,7 @@ fn single_timelock_reveal_later_round() { pallet_drand::Pulses::::remove(round_2000); System::::set_block_number(20); - assert_ok!(Pallet::::reveal_timelocked_commitments(20)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let leftover_b1 = CommitmentOf::::get(netuid_b, who_b).expect("still there"); assert_eq!( @@ -898,7 +898,7 @@ fn single_timelock_reveal_later_round() { insert_drand_pulse(round_2000, &drand_sig_2000); System::::set_block_number(21); - assert_ok!(Pallet::::reveal_timelocked_commitments(21)); + assert_ok!(Pallet::::reveal_timelocked_commitments()); let leftover_b2 = CommitmentOf::::get(netuid_b, who_b); assert!(leftover_b2.is_none(), "Timelock removed => leftover=none"); diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6943a02657..6b650b7615 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -20,7 +20,7 @@ impl Pallet { Self::try_set_pending_children(block_number); // --- 5. Unveil all matured timelocked entries - if let Err(e) = Self::reveal_timelocked_commitments(block_number) { + if let Err(e) = Self::reveal_timelocked_commitments() { log::debug!( "Failed to unveil matured commitments on block {} due to error: {:?}", block_number, diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 1983f1a168..3f6924faa3 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -769,9 +769,7 @@ impl Pallet { Self::deposit_event(Event::SubnetOwnerHotkeySet(netuid, hotkey.clone())); } - pub fn reveal_timelocked_commitments(block_number: u64) -> DispatchResult { - pallet_commitments::Pallet::::reveal_timelocked_commitments( - block_number, - ) + pub fn reveal_timelocked_commitments() -> DispatchResult { + pallet_commitments::Pallet::::reveal_timelocked_commitments() } } From 20bf5026dd0d6b1f2cb5e914c40bab15341a6f17 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 12 Mar 2025 09:49:52 -0700 Subject: [PATCH 015/121] fix merge --- pallets/subtensor/src/tests/mock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 78cfaaa15c..588fcab4db 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -409,6 +409,7 @@ impl crate::Config for Test { type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type CommitmentRuntime = Test; + type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; } parameter_types! { @@ -416,7 +417,6 @@ parameter_types! { pub const CommitmentInitialDeposit: Balance = 0; pub const CommitmentFieldDeposit: Balance = 0; pub const CommitmentRateLimit: BlockNumber = 100; - type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; } pub struct OriginPrivilegeCmp; From 4b237eae1dacd46588c3252d7457a999ad86e3d0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 11:03:48 -0700 Subject: [PATCH 016/121] use space based rate limit per subnet tempo --- Cargo.lock | 2 +- pallets/admin-utils/src/tests/mock.rs | 38 +------- pallets/commitments/Cargo.toml | 11 ++- pallets/commitments/src/lib.rs | 98 ++++++++++++++++++-- pallets/commitments/src/mock.rs | 8 ++ pallets/commitments/src/types.rs | 10 ++ pallets/subtensor/Cargo.toml | 11 +-- pallets/subtensor/src/coinbase/block_step.rs | 9 -- pallets/subtensor/src/macros/config.rs | 3 - pallets/subtensor/src/tests/mock.rs | 36 ------- pallets/subtensor/src/utils/misc.rs | 4 - runtime/src/lib.rs | 15 ++- 12 files changed, 133 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3cb5acc976..57227ec937 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6099,6 +6099,7 @@ dependencies = [ "log", "pallet-balances", "pallet-drand", + "pallet-subtensor", "parity-scale-codec", "rand_chacha", "scale-info", @@ -6461,7 +6462,6 @@ dependencies = [ "num-traits", "pallet-balances", "pallet-collective", - "pallet-commitments", "pallet-drand", "pallet-membership", "pallet-preimage", diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 858570a165..fc0d016198 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -7,11 +7,10 @@ use frame_support::{ }; use frame_system as system; use frame_system::{EnsureNever, EnsureRoot, limits}; -use scale_info::TypeInfo; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityList as GrandpaAuthorityList; use sp_core::U256; -use sp_core::{ConstU64, Decode, Encode, Get, H256}; +use sp_core::{ConstU64, H256}; use sp_runtime::{ BuildStorage, KeyTypeId, Perbill, testing::TestXt, @@ -33,7 +32,6 @@ frame_support::construct_runtime!( Drand: pallet_drand::{Pallet, Call, Storage, Event} = 6, Grandpa: pallet_grandpa = 7, EVMChainId: pallet_evm_chain_id = 8, - Commitments: pallet_commitments::{Pallet, Call, Storage, Event} = 9, } ); @@ -200,7 +198,6 @@ impl pallet_subtensor::Config for Test { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; - type CommitmentRuntime = Test; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; } @@ -378,39 +375,6 @@ where type OverarchingCall = RuntimeCall; } -parameter_types! { - pub const MaxCommitFieldsInner: u32 = 1; - pub const CommitmentInitialDeposit: Balance = 0; - pub const CommitmentFieldDeposit: Balance = 0; - pub const CommitmentRateLimit: BlockNumber = 100; -} - -#[subtensor_macros::freeze_struct("7c76bd954afbb54e")] -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] -pub struct MaxCommitFields; -impl Get for MaxCommitFields { - fn get() -> u32 { - MaxCommitFieldsInner::get() - } -} - -pub struct AllowCommitments; -impl pallet_commitments::CanCommit for AllowCommitments { - fn can_commit(_netuid: u16, _address: &AccountId) -> bool { - true - } -} - -impl pallet_commitments::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type WeightInfo = pallet_commitments::weights::SubstrateWeight; - type CanCommit = AllowCommitments; - type MaxFields = MaxCommitFields; - type InitialDeposit = CommitmentInitialDeposit; - type FieldDeposit = CommitmentFieldDeposit; - type DefaultRateLimit = CommitmentRateLimit; -} // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { sp_tracing::try_init_simple(); diff --git a/pallets/commitments/Cargo.toml b/pallets/commitments/Cargo.toml index fb0091debf..7b2f49ace8 100644 --- a/pallets/commitments/Cargo.toml +++ b/pallets/commitments/Cargo.toml @@ -39,6 +39,8 @@ sha2 = { workspace = true } log = { workspace = true } +pallet-subtensor = { path = "../subtensor", default-features = false } + [dev-dependencies] sp-core = { workspace = true } sp-io = { workspace = true } @@ -65,7 +67,8 @@ std = [ "w3f-bls/std", "hex/std", "rand_chacha/std", - "sha2/std" + "sha2/std", + "pallet-subtensor/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -73,12 +76,14 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "pallet-drand/runtime-benchmarks" + "pallet-drand/runtime-benchmarks", + "pallet-subtensor/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", "sp-runtime/try-runtime", - "pallet-drand/try-runtime" + "pallet-drand/try-runtime", + "pallet-subtensor/try-runtime" ] diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 850fe23e38..8fa467f7cf 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -70,6 +70,15 @@ pub mod pallet { /// The rate limit for commitments #[pallet::constant] type DefaultRateLimit: Get>; + + /// Used to retreive the given subnet's tempo + type TempoInterface: GetTempoInterface; + } + + /// Used to retreive the given subnet's tempo + pub trait GetTempoInterface { + /// Used to retreive the given subnet's tempo + fn get_tempo_for_netuid(netuid: u16) -> u16; } #[pallet::event] @@ -108,6 +117,8 @@ pub mod pallet { AccountNotAllowedCommit, /// Account is trying to commit data too fast, rate limit exceeded CommitmentSetRateLimitExceeded, + /// Space Limit Exceeded for the current interval + SpaceLimitExceeded, } #[pallet::type_value] @@ -162,15 +173,39 @@ pub mod pallet { OptionQuery, >; + /// Maps (netuid, who) -> usage (how many “bytes” they've committed) + /// in the RateLimit window + #[pallet::storage] + #[pallet::getter(fn used_space_of)] + pub type UsedSpaceOf = StorageDoubleMap< + _, + Identity, + u16, + Twox64Concat, + T::AccountId, + UsageTracker>, + OptionQuery, + >; + + #[pallet::type_value] + /// The default Maximum Space + pub fn DefaultMaxSpace() -> u32 { + 3100 + } + + #[pallet::storage] + #[pallet::getter(fn max_space_per_user_per_rate_limit)] + pub type MaxSpace = StorageValue<_, u32, ValueQuery, DefaultMaxSpace>; + #[pallet::call] impl Pallet { /// Set the commitment for a given netuid #[pallet::call_index(0)] #[pallet::weight(( - ::WeightInfo::set_commitment(), - DispatchClass::Operational, - Pays::No - ))] + ::WeightInfo::set_commitment(), + DispatchClass::Operational, + Pays::No + ))] pub fn set_commitment( origin: OriginFor, netuid: u16, @@ -189,14 +224,26 @@ pub mod pallet { ); let cur_block = >::block_number(); - if let Some(last_commit) = >::get(netuid, &who) { - ensure!( - cur_block >= last_commit.saturating_add(RateLimit::::get()), - Error::::CommitmentSetRateLimitExceeded - ); + + let required_space = info.using_encoded(|b| b.len()) as u64; + + let mut usage = UsedSpaceOf::::get(netuid, &who).unwrap_or_default(); + let tempo_length = T::TempoInterface::get_tempo_for_netuid(netuid); + + if cur_block.saturating_sub(usage.last_reset_block) >= tempo_length.into() { + usage.last_reset_block = cur_block; + usage.used_space = 0; } - let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); + let max_allowed = MaxSpace::::get() as u64; + ensure!( + usage.used_space + required_space <= max_allowed, + Error::::SpaceLimitExceeded + ); + + usage.used_space += required_space; + UsedSpaceOf::::insert(netuid, &who, usage); + let mut id = match >::get(netuid, &who) { Some(mut id) => { id.info = *info.clone(); @@ -211,6 +258,7 @@ pub mod pallet { }; let old_deposit = id.deposit; + let fd = >::from(extra_fields).saturating_mul(T::FieldDeposit::get()); id.deposit = T::InitialDeposit::get().saturating_add(fd); if id.deposit > old_deposit { T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; @@ -264,6 +312,36 @@ pub mod pallet { RateLimit::::set(rate_limit_blocks.into()); Ok(()) } + + /// Sudo-set MaxSpace + #[pallet::call_index(2)] + #[pallet::weight(( + ::WeightInfo::set_rate_limit(), + DispatchClass::Operational, + Pays::No + ))] + pub fn set_max_space_per_user_per_rate_limit( + origin: OriginFor, + new_limit: u32, + ) -> DispatchResult { + ensure_root(origin)?; + MaxSpace::::set(new_limit); + Ok(()) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: BlockNumberFor) -> Weight { + if let Err(e) = Self::reveal_timelocked_commitments() { + log::debug!( + "Failed to unveil matured commitments on block {:?}: {:?}", + n, + e + ); + } + Weight::from_parts(0, 0) + } } } diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 5a4b3cace5..88885a073d 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -101,6 +101,14 @@ impl pallet_commitments::Config for Test { type FieldDeposit = ConstU64<0>; type InitialDeposit = ConstU64<0>; type DefaultRateLimit = ConstU64<0>; + type TempoInterface = MockTempoInterface; +} + +pub struct MockTempoInterface; +impl pallet_commitments::GetTempoInterface for MockTempoInterface { + fn get_tempo_for_netuid(_netuid: u16) -> u16 { + 360 + } } impl pallet_drand::Config for Test { diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 400c102443..735d085a93 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -346,6 +346,16 @@ pub struct RevealedData, BlockNumber> { pub deposit: Balance, } +/// Tracks how much “space” each (netuid, who) has used within the current RateLimit block-window. +#[freeze_struct("c73c7815f7c51556")] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, TypeInfo)] +pub struct UsageTracker { + /// Last Reset block + pub last_reset_block: BlockNumber, + /// Space used + pub used_space: u64, +} + /// Information concerning the identity of the controller of an account. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index 42b468b1d7..f240245c47 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -56,8 +56,6 @@ w3f-bls = { workspace = true, default-features = false } sha2 = { workspace = true } rand_chacha = { workspace = true } -pallet-commitments = { default-features = false, path = "../commitments" } - [dev-dependencies] pallet-balances = { workspace = true, features = ["std"] } pallet-scheduler = { workspace = true } @@ -109,8 +107,7 @@ std = [ "rand_chacha/std", "safe-math/std", "sha2/std", - "share-pool/std", - "pallet-commitments/std" + "share-pool/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -123,8 +120,7 @@ runtime-benchmarks = [ "pallet-collective/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", - "pallet-drand/runtime-benchmarks", - "pallet-commitments/runtime-benchmarks" + "pallet-drand/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", @@ -137,8 +133,7 @@ try-runtime = [ "pallet-utility/try-runtime", "sp-runtime/try-runtime", "pallet-collective/try-runtime", - "pallet-drand/try-runtime", - "pallet-commitments/try-runtime" + "pallet-drand/try-runtime" ] pow-faucet = [] fast-blocks = [] diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6b650b7615..2eb7ec2fb4 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -19,15 +19,6 @@ impl Pallet { // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); - // --- 5. Unveil all matured timelocked entries - if let Err(e) = Self::reveal_timelocked_commitments() { - log::debug!( - "Failed to unveil matured commitments on block {} due to error: {:?}", - block_number, - e - ); - } - // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 17d5b93457..af448c8771 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -47,9 +47,6 @@ mod config { /// the preimage to store the call data. type Preimages: QueryPreimage + StorePreimage; - /// The commitment pallet's runtime - type CommitmentRuntime: pallet_commitments::Config; - /// ================================= /// ==== Initial Value Constants ==== /// ================================= diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 588fcab4db..0d979a6126 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -38,7 +38,6 @@ frame_support::construct_runtime!( Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 9, Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 10, Drand: pallet_drand::{Pallet, Call, Storage, Event} = 11, - Commitments: pallet_commitments::{Pallet, Call, Storage, Event} = 12, } ); @@ -408,17 +407,9 @@ impl crate::Config for Test { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; - type CommitmentRuntime = Test; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; } -parameter_types! { - pub const MaxCommitFieldsInner: u32 = 1; - pub const CommitmentInitialDeposit: Balance = 0; - pub const CommitmentFieldDeposit: Balance = 0; - pub const CommitmentRateLimit: BlockNumber = 100; -} - pub struct OriginPrivilegeCmp; impl PrivilegeCmp for OriginPrivilegeCmp { @@ -540,33 +531,6 @@ impl frame_system::offchain::CreateSignedTransaction> f } } -#[freeze_struct("7c76bd954afbb54e")] -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] -pub struct MaxCommitFields; -impl Get for MaxCommitFields { - fn get() -> u32 { - MaxCommitFieldsInner::get() - } -} - -pub struct AllowCommitments; -impl pallet_commitments::CanCommit for AllowCommitments { - fn can_commit(_netuid: u16, _address: &AccountId) -> bool { - true - } -} - -impl pallet_commitments::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type WeightInfo = pallet_commitments::weights::SubstrateWeight; - type CanCommit = AllowCommitments; - type MaxFields = MaxCommitFields; - type InitialDeposit = CommitmentInitialDeposit; - type FieldDeposit = CommitmentFieldDeposit; - type DefaultRateLimit = CommitmentRateLimit; -} - #[allow(dead_code)] // Build genesis storage according to the mock runtime. pub fn new_test_ext(block_number: BlockNumber) -> sp_io::TestExternalities { diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index d8481a002c..19d07248d2 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -734,10 +734,6 @@ impl Pallet { Self::deposit_event(Event::SubnetOwnerHotkeySet(netuid, hotkey.clone())); } - pub fn reveal_timelocked_commitments() -> DispatchResult { - pallet_commitments::Pallet::::reveal_timelocked_commitments() - } - // Get the uid of the Owner Hotkey for a subnet. pub fn get_owner_uid(netuid: u16) -> Option { match SubnetOwnerHotkey::::try_get(netuid) { diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index f8bc81f383..db151817d3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -960,6 +960,20 @@ impl pallet_commitments::Config for Runtime { type InitialDeposit = CommitmentInitialDeposit; type FieldDeposit = CommitmentFieldDeposit; type DefaultRateLimit = CommitmentRateLimit; + type TempoInterface = TempoInterface; +} + +pub struct TempoInterface; +impl pallet_commitments::GetTempoInterface for TempoInterface { + fn get_tempo_for_netuid(_netuid: u16) -> u16 { + 360 + } +} + +impl pallet_commitments::GetTempoInterface for Runtime { + fn get_tempo_for_netuid(netuid: u16) -> u16 { + pallet_subtensor::Tempo::::get(netuid) + } } #[cfg(not(feature = "fast-blocks"))] @@ -1093,7 +1107,6 @@ impl pallet_subtensor::Config for Runtime { type Preimages = Preimage; type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; - type CommitmentRuntime = Runtime; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; } From 5e70dc1fc770e41841f0448d9373d1e6d1d1aadb Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 11:08:55 -0700 Subject: [PATCH 017/121] clippy --- pallets/commitments/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 8fa467f7cf..9a5f4fa75d 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -237,11 +237,12 @@ pub mod pallet { let max_allowed = MaxSpace::::get() as u64; ensure!( - usage.used_space + required_space <= max_allowed, + usage.used_space.saturating_add(required_space) <= max_allowed, Error::::SpaceLimitExceeded ); - usage.used_space += required_space; + usage.used_space = usage.used_space.saturating_add(required_space); + UsedSpaceOf::::insert(netuid, &who, usage); let mut id = match >::get(netuid, &who) { From b441d87b59b3e6ab5940cf0834593b32e4d44842 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 11:52:56 -0700 Subject: [PATCH 018/121] fix `required_space` --- pallets/commitments/src/lib.rs | 6 +++++- pallets/commitments/src/types.rs | 12 ++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 9a5f4fa75d..81fd0ed64b 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -225,7 +225,11 @@ pub mod pallet { let cur_block = >::block_number(); - let required_space = info.using_encoded(|b| b.len()) as u64; + let required_space: u64 = info + .fields + .iter() + .map(|field| field.len_for_rate_limit()) + .sum(); let mut usage = UsedSpaceOf::::get(netuid, &who).unwrap_or_default(); let tempo_length = T::TempoInterface::get_tempo_for_netuid(netuid); diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 735d085a93..e90b8ade3f 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -69,6 +69,18 @@ impl Data { pub fn is_timelock_encrypted(&self) -> bool { matches!(self, Data::TimelockEncrypted { .. }) } + + pub fn len_for_rate_limit(&self) -> u64 { + match self { + Data::None => 0, + Data::Raw(bytes) => bytes.len() as u64, + Data::BlakeTwo256(arr) + | Data::Sha256(arr) + | Data::Keccak256(arr) + | Data::ShaThree256(arr) => arr.len() as u64, + Data::TimelockEncrypted { encrypted, .. } => encrypted.len() as u64, + } + } } impl Decode for Data { From c94ea1fe8f1927c14cbbb71b1cea039268a11676 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 11:53:05 -0700 Subject: [PATCH 019/121] add tests --- pallets/commitments/src/tests.rs | 282 +++++++++++++++++++++++++++---- 1 file changed, 250 insertions(+), 32 deletions(-) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index bcb775f9ce..971fa66ad9 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -3,7 +3,7 @@ use sp_std::prelude::*; #[cfg(test)] use crate::{ - CommitmentInfo, CommitmentOf, Config, Data, Error, Event, Pallet, RateLimit, + CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, RateLimit, RevealedCommitments, TimelockedIndex, mock::{ DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, @@ -146,37 +146,38 @@ fn set_commitment_too_many_fields_panics() { }); } -#[test] -fn set_commitment_rate_limit_exceeded() { - new_test_ext().execute_with(|| { - let rate_limit = ::DefaultRateLimit::get(); - System::::set_block_number(1); - let info = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), - }); - - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info.clone() - )); - - // Set block number to just before rate limit expires - System::::set_block_number(rate_limit); - assert_noop!( - Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), - Error::::CommitmentSetRateLimitExceeded - ); - - // Set block number to after rate limit - System::::set_block_number(rate_limit + 1); - assert_ok!(Pallet::::set_commitment( - RuntimeOrigin::signed(1), - 1, - info - )); - }); -} +// DEPRECATED +// #[test] +// fn set_commitment_rate_limit_exceeded() { +// new_test_ext().execute_with(|| { +// let rate_limit = ::DefaultRateLimit::get(); +// System::::set_block_number(1); +// let info = Box::new(CommitmentInfo { +// fields: BoundedVec::try_from(vec![]).expect("Expected not to panic"), +// }); + +// assert_ok!(Pallet::::set_commitment( +// RuntimeOrigin::signed(1), +// 1, +// info.clone() +// )); + +// // Set block number to just before rate limit expires +// System::::set_block_number(rate_limit); +// assert_noop!( +// Pallet::::set_commitment(RuntimeOrigin::signed(1), 1, info.clone()), +// Error::::CommitmentSetRateLimitExceeded +// ); + +// // Set block number to after rate limit +// System::::set_block_number(rate_limit + 1); +// assert_ok!(Pallet::::set_commitment( +// RuntimeOrigin::signed(1), +// 1, +// info +// )); +// }); +// } #[test] fn set_commitment_updates_deposit() { @@ -908,3 +909,220 @@ fn single_timelock_reveal_later_round() { ); }); } + +#[test] +fn tempo_based_space_limit_accumulates_in_same_window() { + new_test_ext().execute_with(|| { + let netuid = 1; + let who = 100; + let space_limit = 50; + MaxSpace::::set(space_limit); + System::::set_block_number(0); + + // A single commitment that uses some space, e.g. 30 bytes: + let data = vec![0u8; 30]; + let info = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw( + data.try_into().expect("Data up to 128 bytes OK"), + )]) + .expect("1 field is <= MaxFields"), + }); + + // 2) First call => usage=0 => usage=30 after. OK. + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + info.clone(), + )); + + // 3) Second call => tries another 30 bytes in the SAME block => total=60 => exceeds 50 => should fail. + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid, info.clone()), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_resets_after_tempo() { + new_test_ext().execute_with(|| { + let netuid = 2; + let who = 101; + + MaxSpace::::set(40); + System::::set_block_number(1); + + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 20].try_into().unwrap())]) + .unwrap(), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + )); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + System::::set_block_number(200); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + System::::set_block_number(400); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_small + )); + }); +} + +#[test] +fn tempo_based_space_limit_does_not_affect_different_netuid() { + new_test_ext().execute_with(|| { + let netuid_a = 10; + let netuid_b = 20; + let who = 111; + let space_limit = 50; + MaxSpace::::set(space_limit); + + let commit_large = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 40].try_into().unwrap())]) + .unwrap(), + }); + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 20].try_into().unwrap())]) + .unwrap(), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_a, + commit_large.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_a, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid_b, + commit_large + )); + + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid_b, commit_small), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_does_not_affect_different_user() { + new_test_ext().execute_with(|| { + let netuid = 10; + let user1 = 123; + let user2 = 456; + let space_limit = 50; + MaxSpace::::set(space_limit); + + let commit_large = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 40].try_into().unwrap())]) + .unwrap(), + }); + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 20].try_into().unwrap())]) + .unwrap(), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user1), + netuid, + commit_large.clone() + )); + + assert_noop!( + Pallet::::set_commitment( + RuntimeOrigin::signed(user1), + netuid, + commit_small.clone() + ), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(user2), + netuid, + commit_large + )); + + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(user2), netuid, commit_small), + Error::::SpaceLimitExceeded + ); + }); +} + +#[test] +fn tempo_based_space_limit_sudo_set_max_space() { + new_test_ext().execute_with(|| { + let netuid = 3; + let who = 15; + MaxSpace::::set(30); + + System::::set_block_number(1); + let commit_25 = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 25].try_into().unwrap())]) + .unwrap(), + }); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_25.clone() + )); + assert_noop!( + Pallet::::set_commitment(RuntimeOrigin::signed(who), netuid, commit_25.clone()), + Error::::SpaceLimitExceeded + ); + + assert_ok!(Pallet::::set_max_space_per_user_per_rate_limit( + RuntimeOrigin::root(), + 100 + )); + + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + commit_25 + )); + }); +} From 501baa2eaaf11cf706663cdfe215901d4fb81628 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 11:56:03 -0700 Subject: [PATCH 020/121] clippy --- pallets/commitments/src/tests.rs | 36 +++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 971fa66ad9..9460b2c691 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -953,8 +953,10 @@ fn tempo_based_space_limit_resets_after_tempo() { System::::set_block_number(1); let commit_small = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 20].try_into().unwrap())]) - .unwrap(), + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), }); assert_ok!(Pallet::::set_commitment( @@ -1009,12 +1011,16 @@ fn tempo_based_space_limit_does_not_affect_different_netuid() { MaxSpace::::set(space_limit); let commit_large = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 40].try_into().unwrap())]) - .unwrap(), + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 40].try_into().expect("expected ok"), + )]) + .expect("expected ok"), }); let commit_small = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 20].try_into().unwrap())]) - .unwrap(), + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), }); assert_ok!(Pallet::::set_commitment( @@ -1055,12 +1061,16 @@ fn tempo_based_space_limit_does_not_affect_different_user() { MaxSpace::::set(space_limit); let commit_large = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 40].try_into().unwrap())]) - .unwrap(), + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 40].try_into().expect("expected ok"), + )]) + .expect("expected ok"), }); let commit_small = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 20].try_into().unwrap())]) - .unwrap(), + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 20].try_into().expect("expected ok"), + )]) + .expect("expected ok"), }); assert_ok!(Pallet::::set_commitment( @@ -1100,8 +1110,10 @@ fn tempo_based_space_limit_sudo_set_max_space() { System::::set_block_number(1); let commit_25 = Box::new(CommitmentInfo { - fields: BoundedVec::try_from(vec![Data::Raw(vec![0u8; 25].try_into().unwrap())]) - .unwrap(), + fields: BoundedVec::try_from(vec![Data::Raw( + vec![0u8; 25].try_into().expect("expected ok"), + )]) + .expect("expected ok"), }); assert_ok!(Pallet::::set_commitment( From 2376741b05312c2f55cf84a4930d7ddbeb6846f6 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 11:59:17 -0700 Subject: [PATCH 021/121] revert unneeded --- Cargo.lock | 1 - pallets/admin-utils/Cargo.toml | 10 +++------- pallets/drand/src/lib.rs | 2 +- pallets/subtensor/src/coinbase/block_step.rs | 1 - 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57227ec937..51a10fdf02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5991,7 +5991,6 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "pallet-commitments", "pallet-drand", "pallet-evm-chain-id", "pallet-grandpa", diff --git a/pallets/admin-utils/Cargo.toml b/pallets/admin-utils/Cargo.toml index d8a413977f..587f9daa32 100644 --- a/pallets/admin-utils/Cargo.toml +++ b/pallets/admin-utils/Cargo.toml @@ -32,7 +32,6 @@ substrate-fixed = { workspace = true } pallet-evm-chain-id = { workspace = true } pallet-drand = { workspace = true, default-features = false } sp-consensus-grandpa = { workspace = true } -pallet-commitments = { default-features = false, path = "../commitments" } [dev-dependencies] sp-core = { workspace = true } @@ -67,8 +66,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", - "substrate-fixed/std", - "pallet-commitments/std" + "substrate-fixed/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -79,8 +77,7 @@ runtime-benchmarks = [ "pallet-grandpa/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-subtensor/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "pallet-commitments/runtime-benchmarks" + "sp-runtime/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", @@ -91,6 +88,5 @@ try-runtime = [ "pallet-grandpa/try-runtime", "pallet-scheduler/try-runtime", "pallet-subtensor/try-runtime", - "sp-runtime/try-runtime", - "pallet-commitments/try-runtime" + "sp-runtime/try-runtime" ] diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index d9da288212..40bf7ccb9b 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -219,7 +219,7 @@ pub mod pallet { pub type Pulses = StorageMap<_, Blake2_128Concat, RoundNumber, Pulse, OptionQuery>; #[pallet::storage] - pub type LastStoredRound = StorageValue<_, RoundNumber, ValueQuery>; + pub(super) type LastStoredRound = StorageValue<_, RoundNumber, ValueQuery>; /// Defines the block when next unsigned transaction will be accepted. /// diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 2eb7ec2fb4..669f8e09da 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -18,7 +18,6 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); - // Return ok. Ok(()) } From a0b33c7dcfc1e04e351de1a9c2cefa9c2706fe75 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 13 Mar 2025 12:00:46 -0700 Subject: [PATCH 022/121] revert unneeded change --- pallets/admin-utils/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/admin-utils/Cargo.toml b/pallets/admin-utils/Cargo.toml index 587f9daa32..b3c1410cca 100644 --- a/pallets/admin-utils/Cargo.toml +++ b/pallets/admin-utils/Cargo.toml @@ -66,7 +66,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", - "substrate-fixed/std" + "substrate-fixed/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -77,7 +77,7 @@ runtime-benchmarks = [ "pallet-grandpa/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-subtensor/runtime-benchmarks", - "sp-runtime/runtime-benchmarks" + "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", @@ -88,5 +88,5 @@ try-runtime = [ "pallet-grandpa/try-runtime", "pallet-scheduler/try-runtime", "pallet-subtensor/try-runtime", - "sp-runtime/try-runtime" + "sp-runtime/try-runtime", ] From c1236c6f061e7ad1c598eb76375e743d99474f30 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 14 Mar 2025 09:54:11 -0700 Subject: [PATCH 023/121] rename set_max_space --- pallets/commitments/src/lib.rs | 5 +---- pallets/commitments/src/tests.rs | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 81fd0ed64b..246073134a 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -325,10 +325,7 @@ pub mod pallet { DispatchClass::Operational, Pays::No ))] - pub fn set_max_space_per_user_per_rate_limit( - origin: OriginFor, - new_limit: u32, - ) -> DispatchResult { + pub fn set_max_space(origin: OriginFor, new_limit: u32) -> DispatchResult { ensure_root(origin)?; MaxSpace::::set(new_limit); Ok(()) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 9460b2c691..f07b4ed687 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1126,10 +1126,7 @@ fn tempo_based_space_limit_sudo_set_max_space() { Error::::SpaceLimitExceeded ); - assert_ok!(Pallet::::set_max_space_per_user_per_rate_limit( - RuntimeOrigin::root(), - 100 - )); + assert_ok!(Pallet::::set_max_space(RuntimeOrigin::root(), 100)); assert_ok!(Pallet::::set_commitment( RuntimeOrigin::signed(who), From 2b9a90f4de685157f3113d0241b801e0425e4803 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 14 Mar 2025 10:24:40 -0700 Subject: [PATCH 024/121] add test for reveal in blockstep --- pallets/commitments/src/tests.rs | 83 ++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index f07b4ed687..0ce611122a 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -12,6 +12,7 @@ use crate::{ }; use frame_support::{BoundedVec, assert_noop, assert_ok, traits::Get}; use frame_system::Pallet as System; +use frame_support::pallet_prelude::Hooks; #[allow(clippy::indexing_slicing)] #[test] @@ -1135,3 +1136,85 @@ fn tempo_based_space_limit_sudo_set_max_space() { )); }); } + +#[allow(clippy::indexing_slicing)] +#[test] +fn on_initialize_reveals_matured_timelocks() { + new_test_ext().execute_with(|| { + let who = 42; + let netuid = 7; + let reveal_round = 1000; + + let message_text = b"Timelock test via on_initialize"; + + let inner_fields: BoundedVec::MaxFields> = BoundedVec::try_from(vec![Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes is OK for Data::Raw"), + )]) + .expect("Should not exceed MaxFields"); + + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: inner_fields, + }; + + let plaintext = inner_info.encode(); + let encrypted = produce_ciphertext(&plaintext, reveal_round); + + let outer_fields = BoundedVec::try_from(vec![Data::TimelockEncrypted { + encrypted, + reveal_round, + }]) + .expect("One field is well under MaxFields"); + let info_outer = CommitmentInfo { fields: outer_fields }; + + System::::set_block_number(1); + assert_ok!(Pallet::::set_commitment( + RuntimeOrigin::signed(who), + netuid, + Box::new(info_outer) + )); + + + assert!(CommitmentOf::::get(netuid, who).is_some()); + assert!( + TimelockedIndex::::get().contains(&(netuid, who)), + "Should appear in TimelockedIndex since it contains a timelock" + ); + + let drand_sig_hex = hex::decode(DRAND_QUICKNET_SIG_HEX) + .expect("Decoding DRAND_QUICKNET_SIG_HEX must not fail"); + insert_drand_pulse(reveal_round, &drand_sig_hex); + + assert!(RevealedCommitments::::get(netuid, who).is_none()); + + System::::set_block_number(2); + as Hooks>::on_initialize(2); + + let revealed_opt = RevealedCommitments::::get(netuid, who); + assert!( + revealed_opt.is_some(), + "Expected that the timelock got revealed at block #2" + ); + + let leftover = CommitmentOf::::get(netuid, who); + assert!( + leftover.is_none(), + "After revealing the only timelock, the entire commitment is removed." + ); + + assert!( + !TimelockedIndex::::get().contains(&(netuid, who)), + "No longer in TimelockedIndex after reveal." + ); + + let revealed_data = revealed_opt.expect("expected to not panic"); + assert_eq!(revealed_data.info.fields.len(), 1); + if let Data::Raw(bound_bytes) = &revealed_data.info.fields[0] { + assert_eq!(bound_bytes.as_slice(), message_text); + } else { + panic!("Expected a Data::Raw variant in revealed data."); + } + }); +} From a245655aec62a5de02e1fb6063cece1411abbfb3 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 14 Mar 2025 10:27:54 -0700 Subject: [PATCH 025/121] fmt --- pallets/commitments/src/tests.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 0ce611122a..9de87a8b58 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -10,9 +10,9 @@ use crate::{ new_test_ext, produce_ciphertext, }, }; +use frame_support::pallet_prelude::Hooks; use frame_support::{BoundedVec, assert_noop, assert_ok, traits::Get}; use frame_system::Pallet as System; -use frame_support::pallet_prelude::Hooks; #[allow(clippy::indexing_slicing)] #[test] @@ -1147,17 +1147,18 @@ fn on_initialize_reveals_matured_timelocks() { let message_text = b"Timelock test via on_initialize"; - let inner_fields: BoundedVec::MaxFields> = BoundedVec::try_from(vec![Data::Raw( - message_text - .to_vec() - .try_into() - .expect("<= 128 bytes is OK for Data::Raw"), - )]) - .expect("Should not exceed MaxFields"); - + let inner_fields: BoundedVec::MaxFields> = + BoundedVec::try_from(vec![Data::Raw( + message_text + .to_vec() + .try_into() + .expect("<= 128 bytes is OK for Data::Raw"), + )]) + .expect("Should not exceed MaxFields"); + let inner_info: CommitmentInfo<::MaxFields> = CommitmentInfo { fields: inner_fields, - }; + }; let plaintext = inner_info.encode(); let encrypted = produce_ciphertext(&plaintext, reveal_round); @@ -1167,7 +1168,9 @@ fn on_initialize_reveals_matured_timelocks() { reveal_round, }]) .expect("One field is well under MaxFields"); - let info_outer = CommitmentInfo { fields: outer_fields }; + let info_outer = CommitmentInfo { + fields: outer_fields, + }; System::::set_block_number(1); assert_ok!(Pallet::::set_commitment( @@ -1176,7 +1179,6 @@ fn on_initialize_reveals_matured_timelocks() { Box::new(info_outer) )); - assert!(CommitmentOf::::get(netuid, who).is_some()); assert!( TimelockedIndex::::get().contains(&(netuid, who)), From b0eb8bb698382ab6b07715fd82f6f79e12091b14 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 14 Mar 2025 12:36:42 -0700 Subject: [PATCH 026/121] commitments data limit follows tempo exactly --- pallets/commitments/src/lib.rs | 23 +++++++++-------------- pallets/commitments/src/mock.rs | 4 ++-- pallets/commitments/src/tests.rs | 4 +++- pallets/commitments/src/types.rs | 8 ++++---- runtime/src/lib.rs | 8 ++++---- 5 files changed, 22 insertions(+), 25 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 246073134a..a136a2a7e5 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -18,6 +18,7 @@ pub use weights::WeightInfo; use ark_serialize::CanonicalDeserialize; use frame_support::{BoundedVec, traits::Currency}; use scale_info::prelude::collections::BTreeSet; +use sp_runtime::SaturatedConversion; use sp_runtime::{Saturating, traits::Zero}; use sp_std::{boxed::Box, vec::Vec}; use tle::{ @@ -77,8 +78,8 @@ pub mod pallet { /// Used to retreive the given subnet's tempo pub trait GetTempoInterface { - /// Used to retreive the given subnet's tempo - fn get_tempo_for_netuid(netuid: u16) -> u16; + /// Used to retreive the epoch index for the given subnet. + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64; } #[pallet::event] @@ -177,15 +178,8 @@ pub mod pallet { /// in the RateLimit window #[pallet::storage] #[pallet::getter(fn used_space_of)] - pub type UsedSpaceOf = StorageDoubleMap< - _, - Identity, - u16, - Twox64Concat, - T::AccountId, - UsageTracker>, - OptionQuery, - >; + pub type UsedSpaceOf = + StorageDoubleMap<_, Identity, u16, Twox64Concat, T::AccountId, UsageTracker, OptionQuery>; #[pallet::type_value] /// The default Maximum Space @@ -232,10 +226,11 @@ pub mod pallet { .sum(); let mut usage = UsedSpaceOf::::get(netuid, &who).unwrap_or_default(); - let tempo_length = T::TempoInterface::get_tempo_for_netuid(netuid); + let cur_block_u64 = cur_block.saturated_into::(); + let current_epoch = T::TempoInterface::get_epoch_index(netuid, cur_block_u64); - if cur_block.saturating_sub(usage.last_reset_block) >= tempo_length.into() { - usage.last_reset_block = cur_block; + if usage.last_epoch != current_epoch { + usage.last_epoch = current_epoch; usage.used_space = 0; } diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 88885a073d..8b99faf069 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -106,8 +106,8 @@ impl pallet_commitments::Config for Test { pub struct MockTempoInterface; impl pallet_commitments::GetTempoInterface for MockTempoInterface { - fn get_tempo_for_netuid(_netuid: u16) -> u16 { - 360 + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) } } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 9de87a8b58..00a661a61f 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -950,6 +950,8 @@ fn tempo_based_space_limit_resets_after_tempo() { let netuid = 2; let who = 101; + //TODO SPIIGOT: make this line work + //pallet_subtensor::Tempo::::insert(netuid, 360); MaxSpace::::set(40); System::::set_block_number(1); @@ -992,7 +994,7 @@ fn tempo_based_space_limit_resets_after_tempo() { Error::::SpaceLimitExceeded ); - System::::set_block_number(400); + System::::set_block_number(363); assert_ok!(Pallet::::set_commitment( RuntimeOrigin::signed(who), diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index e90b8ade3f..0f1d2302a5 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -359,11 +359,11 @@ pub struct RevealedData, BlockNumber> { } /// Tracks how much “space” each (netuid, who) has used within the current RateLimit block-window. -#[freeze_struct("c73c7815f7c51556")] +#[freeze_struct("1f23fb50f96326e4")] #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, TypeInfo)] -pub struct UsageTracker { - /// Last Reset block - pub last_reset_block: BlockNumber, +pub struct UsageTracker { + /// Last epoch block + pub last_epoch: u64, /// Space used pub used_space: u64, } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index db151817d3..8434e2faf1 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -965,14 +965,14 @@ impl pallet_commitments::Config for Runtime { pub struct TempoInterface; impl pallet_commitments::GetTempoInterface for TempoInterface { - fn get_tempo_for_netuid(_netuid: u16) -> u16 { - 360 + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) } } impl pallet_commitments::GetTempoInterface for Runtime { - fn get_tempo_for_netuid(netuid: u16) -> u16 { - pallet_subtensor::Tempo::::get(netuid) + fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { + SubtensorModule::get_epoch_index(netuid, cur_block) } } From c1015daf3dc38d6a2e8519023eaaacff56a8020d Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 14 Mar 2025 13:06:40 -0700 Subject: [PATCH 027/121] fix mock --- pallets/commitments/src/mock.rs | 7 ++++++- pallets/commitments/src/tests.rs | 4 +--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pallets/commitments/src/mock.rs b/pallets/commitments/src/mock.rs index 8b99faf069..cc2482ff88 100644 --- a/pallets/commitments/src/mock.rs +++ b/pallets/commitments/src/mock.rs @@ -107,7 +107,12 @@ impl pallet_commitments::Config for Test { pub struct MockTempoInterface; impl pallet_commitments::GetTempoInterface for MockTempoInterface { fn get_epoch_index(netuid: u16, cur_block: u64) -> u64 { - SubtensorModule::get_epoch_index(netuid, cur_block) + let tempo = 360; // TODO: configure SubtensorModule in this mock + let tempo_plus_one: u64 = tempo.saturating_add(1); + let netuid_plus_one: u64 = (netuid as u64).saturating_add(1); + let block_with_offset: u64 = cur_block.saturating_add(netuid_plus_one); + + block_with_offset.checked_div(tempo_plus_one).unwrap_or(0) } } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 00a661a61f..e6bf38c445 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -950,8 +950,6 @@ fn tempo_based_space_limit_resets_after_tempo() { let netuid = 2; let who = 101; - //TODO SPIIGOT: make this line work - //pallet_subtensor::Tempo::::insert(netuid, 360); MaxSpace::::set(40); System::::set_block_number(1); @@ -994,7 +992,7 @@ fn tempo_based_space_limit_resets_after_tempo() { Error::::SpaceLimitExceeded ); - System::::set_block_number(363); + System::::set_block_number(360); assert_ok!(Pallet::::set_commitment( RuntimeOrigin::signed(who), From bd33f8c9203c3b04d5f807be7eba6d6f3ed43bfc Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 18 Mar 2025 17:07:49 +0100 Subject: [PATCH 028/121] Add burn/recycle extrinsics --- pallets/subtensor/src/macros/dispatches.rs | 63 ++++++ pallets/subtensor/src/macros/errors.rs | 2 + pallets/subtensor/src/macros/events.rs | 12 ++ pallets/subtensor/src/tests/mod.rs | 1 + pallets/subtensor/src/tests/recycle.rs | 211 +++++++++++++++++++++ pallets/subtensor/src/utils/mod.rs | 1 + pallets/subtensor/src/utils/recycle.rs | 118 ++++++++++++ 7 files changed, 408 insertions(+) create mode 100644 pallets/subtensor/src/tests/recycle.rs create mode 100644 pallets/subtensor/src/utils/recycle.rs diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index bcd2bb33f5..387574da93 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1909,5 +1909,68 @@ mod dispatches { Ok(()) } + + /// Recycles tokens from a cold/hot key pair, reducing AlphaOut on a subnet + /// + /// # Arguments + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of tokens to recycle + /// * `netuid` - The subnet ID from which to reduce AlphaOut + /// + /// # Errors + /// Returns an error if: + /// * The transaction is not signed by the correct coldkey. + /// * The subnet with `netuid` does not exist. + /// * The coldkey does not own the hotkey. + /// * The coldkey does not have enough balance. + /// + /// # Events + /// Emits a `TokensRecycled` event on success. + #[pallet::call_index(101)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 2)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn recycle( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + Self::do_recycle(origin, hotkey, amount, netuid) + } + + /// Burns tokens from a cold/hot key pair without reducing AlphaOut + /// + /// # Arguments + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of tokens to burn + /// * `netuid` - The subnet ID + /// + /// # Errors + /// Returns an error if: + /// * The transaction is not signed by the correct coldkey. + /// * The coldkey does not own the hotkey. + /// * The coldkey does not have enough balance. + /// + /// # Events + /// Emits a `TokensBurned` event on success. + #[pallet::call_index(102)] + #[pallet::weight(( + Weight::from_parts(2_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn burn( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + Self::do_burn(origin, hotkey, amount, netuid) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 1f189cd2f6..609f87b641 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -195,5 +195,7 @@ mod errors { ActivityCutoffTooLow, /// Call is disabled CallDisabled, + /// Not enough AlphaOut on the subnet to recycle + NotEnoughAlphaOutToRecycle, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 834aa901fa..cabd0d14fd 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -275,5 +275,17 @@ mod events { /// Parameters: /// (netuid, new_hotkey) SubnetOwnerHotkeySet(u16, T::AccountId), + + /// Tokens have been recycled, reducing AlphaOut on a subnet. + /// + /// Parameters: + /// (coldkey, hotkey, amount, subnet_id, alpha_recycled) + TokensRecycled(T::AccountId, T::AccountId, u64, u16, u64), + + /// Tokens have been burned without reducing AlphaOut. + /// + /// Parameters: + /// (coldkey, hotkey, amount, subnet_id) + TokensBurned(T::AccountId, T::AccountId, u64, u16), } } diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 6865c9fa49..577c5c7ac7 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -11,6 +11,7 @@ mod mock; mod move_stake; mod networks; mod neuron_info; +mod recycle; mod registration; mod senate; mod serving; diff --git a/pallets/subtensor/src/tests/recycle.rs b/pallets/subtensor/src/tests/recycle.rs new file mode 100644 index 0000000000..eeb0e7276b --- /dev/null +++ b/pallets/subtensor/src/tests/recycle.rs @@ -0,0 +1,211 @@ +use frame_support::{assert_noop, assert_ok, traits::Currency}; +use sp_core::U256; + +use super::mock::*; +use crate::*; + +#[test] +fn test_recycle_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_balance = Balances::free_balance(coldkey); + let initial_alpha = SubnetAlphaOut::::get(netuid); + let initial_net_tao = SubnetTAO::::get(netuid); + // preset total issuance + TotalIssuance::::put(initial_balance + stake); + let initial_issuance = TotalIssuance::::get(); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle + assert_ok!(SubtensorModule::recycle( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(Balances::free_balance(coldkey) < initial_balance); + assert!(SubnetAlphaOut::::get(netuid) < initial_alpha); + assert!(SubnetTAO::::get(netuid) < initial_net_tao); + assert!(TotalIssuance::::get() < initial_issuance); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::TokensRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_burn_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + let initial_balance = Balances::free_balance(&coldkey); + let initial_alpha = SubnetAlphaOut::::get(netuid); + let initial_net_tao = SubnetTAO::::get(netuid); + // preset total issuance + TotalIssuance::::put(initial_balance + stake); + let initial_issuance = TotalIssuance::::get(); + + let burn = stake / 2; + assert_ok!(SubtensorModule::burn( + RuntimeOrigin::signed(coldkey), + hotkey, + burn, + netuid + )); + + assert!(Balances::free_balance(coldkey) < initial_balance); + assert!(SubnetAlphaOut::::get(netuid) == initial_alpha); + assert!(SubnetTAO::::get(netuid) < initial_net_tao); + assert!(TotalIssuance::::get() < initial_issuance); + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::TokensBurned(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_errors() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let wrong_coldkey = U256::from(3); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake_amount = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); + + assert_noop!( + SubtensorModule::recycle( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + 99 // non-existent subnet + ), + Error::::SubNetworkDoesNotExist + ); + + assert_noop!( + SubtensorModule::recycle( + RuntimeOrigin::signed(wrong_coldkey), + hotkey, + 100_000, + netuid + ), + Error::::NonAssociatedColdKey + ); + + assert_noop!( + SubtensorModule::recycle( + RuntimeOrigin::signed(coldkey), + hotkey, + 10_000_000_000, // too much + netuid + ), + Error::::NotEnoughStakeToWithdraw + ); + + // Set AlphaOut to 0 to cause InsufficientLiquidity + SubnetAlphaIn::::insert(netuid, 1_000_000); // Ensure there's enough alphaIn + SubnetAlphaOut::::insert(netuid, 0); // But no alphaOut + + assert_noop!( + SubtensorModule::recycle(RuntimeOrigin::signed(coldkey), hotkey, 100_000, netuid), + Error::::InsufficientLiquidity + ); + }); +} + +#[test] +fn test_burn_errors() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let wrong_coldkey = U256::from(3); + + let subnet_owner_coldkey = U256::from(1001); + let subnet_owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + let stake_amount = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); + + assert_noop!( + SubtensorModule::burn( + RuntimeOrigin::signed(wrong_coldkey), + hotkey, + 100_000, + netuid + ), + Error::::NonAssociatedColdKey + ); + + assert_noop!( + SubtensorModule::burn( + RuntimeOrigin::signed(coldkey), + hotkey, + 10_000_000_000, // too much + netuid + ), + Error::::NotEnoughStakeToWithdraw + ); + }); +} diff --git a/pallets/subtensor/src/utils/mod.rs b/pallets/subtensor/src/utils/mod.rs index 909ad89593..9d19ca8386 100644 --- a/pallets/subtensor/src/utils/mod.rs +++ b/pallets/subtensor/src/utils/mod.rs @@ -2,5 +2,6 @@ use super::*; pub mod identity; pub mod misc; pub mod rate_limiting; +pub mod recycle; #[cfg(feature = "try-runtime")] pub mod try_state; diff --git a/pallets/subtensor/src/utils/recycle.rs b/pallets/subtensor/src/utils/recycle.rs new file mode 100644 index 0000000000..37aedcee43 --- /dev/null +++ b/pallets/subtensor/src/utils/recycle.rs @@ -0,0 +1,118 @@ +use super::*; +use crate::{Error, system::ensure_signed}; + +impl Pallet { + /// Recycles tokens from a cold/hot key pair, reducing AlphaOut on a subnet + /// + /// # Arguments + /// + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of tokens to recycle + /// * `netuid` - The subnet ID from which to reduce AlphaOut + /// + /// # Returns + /// + /// * `DispatchResult` - Success or error + pub fn do_recycle( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + ensure!( + Self::coldkey_owns_hotkey(&coldkey, &hotkey), + Error::::NonAssociatedColdKey + ); + + ensure!( + Self::can_remove_balance_from_coldkey_account(&coldkey, amount), + Error::::NotEnoughStakeToWithdraw + ); + + // convert TAO to alpha equivalent + let Some(alpha_amount) = Self::sim_swap_tao_for_alpha(netuid, amount) else { + return Err(Error::::InsufficientLiquidity.into()); + }; + + ensure!( + SubnetAlphaOut::::get(netuid) >= alpha_amount, + Error::::NotEnoughAlphaOutToRecycle + ); + + // remove the amount from the coldkey account + let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, amount)?; + + // update related storages + SubnetAlphaOut::::mutate(netuid, |total| { + *total = total.saturating_sub(alpha_amount); + }); + SubnetTAO::::mutate(netuid, |v| *v = v.saturating_sub(actual_burn_amount)); + TotalIssuance::::put(TotalIssuance::::get().saturating_sub(actual_burn_amount)); + + Self::increase_rao_recycled(netuid, actual_burn_amount); + + // Deposit event + Self::deposit_event(Event::TokensRecycled( + coldkey, + hotkey, + actual_burn_amount, + netuid, + alpha_amount, + )); + + Ok(()) + } + + /// Burns tokens from a cold/hot key pair without reducing AlphaOut + /// + /// # Arguments + /// + /// * `origin` - The origin of the call (must be signed by the coldkey) + /// * `hotkey` - The hotkey account + /// * `amount` - The amount of tokens to burn + /// * `netuid` - The subnet ID + /// + /// # Returns + /// + /// * `DispatchResult` - Success or error + pub fn do_burn( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + amount: u64, + netuid: u16, + ) -> DispatchResult { + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + ensure!( + Self::coldkey_owns_hotkey(&coldkey, &hotkey), + Error::::NonAssociatedColdKey + ); + + ensure!( + Self::can_remove_balance_from_coldkey_account(&coldkey, amount), + Error::::NotEnoughStakeToWithdraw + ); + + let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, amount)?; + + SubnetTAO::::mutate(netuid, |v| *v = v.saturating_sub(actual_burn_amount)); + TotalIssuance::::put(TotalIssuance::::get().saturating_sub(actual_burn_amount)); + + Self::deposit_event(Event::TokensBurned(coldkey, hotkey, actual_burn_amount, netuid)); + + Ok(()) + } +} From a9e6f8300a8ab02cce91a23f152a279c2050da42 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 18 Mar 2025 18:55:32 +0100 Subject: [PATCH 029/121] Rewrite recycle/burn alpha --- pallets/subtensor/src/macros/dispatches.rs | 31 ++---- pallets/subtensor/src/macros/events.rs | 10 +- pallets/subtensor/src/staking/mod.rs | 1 + .../recycle.rs => staking/recycle_alpha.rs} | 56 ++++------ pallets/subtensor/src/tests/mod.rs | 2 +- .../tests/{recycle.rs => recycle_alpha.rs} | 100 +++++++++++------- pallets/subtensor/src/utils/mod.rs | 1 - 7 files changed, 98 insertions(+), 103 deletions(-) rename pallets/subtensor/src/{utils/recycle.rs => staking/recycle_alpha.rs} (50%) rename pallets/subtensor/src/tests/{recycle.rs => recycle_alpha.rs} (65%) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 387574da93..14813c9875 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1910,20 +1910,13 @@ mod dispatches { Ok(()) } - /// Recycles tokens from a cold/hot key pair, reducing AlphaOut on a subnet + /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet /// /// # Arguments /// * `origin` - The origin of the call (must be signed by the coldkey) /// * `hotkey` - The hotkey account - /// * `amount` - The amount of tokens to recycle - /// * `netuid` - The subnet ID from which to reduce AlphaOut - /// - /// # Errors - /// Returns an error if: - /// * The transaction is not signed by the correct coldkey. - /// * The subnet with `netuid` does not exist. - /// * The coldkey does not own the hotkey. - /// * The coldkey does not have enough balance. + /// * `amount` - The amount of alpha to recycle + /// * `netuid` - The subnet ID /// /// # Events /// Emits a `TokensRecycled` event on success. @@ -1933,29 +1926,23 @@ mod dispatches { DispatchClass::Operational, Pays::Yes ))] - pub fn recycle( + pub fn recycle_alpha( origin: T::RuntimeOrigin, hotkey: T::AccountId, amount: u64, netuid: u16, ) -> DispatchResult { - Self::do_recycle(origin, hotkey, amount, netuid) + Self::do_recycle_alpha(origin, hotkey, amount, netuid) } - /// Burns tokens from a cold/hot key pair without reducing AlphaOut + /// Burns alpha from a cold/hot key pair without reducing `AlphaOut` /// /// # Arguments /// * `origin` - The origin of the call (must be signed by the coldkey) /// * `hotkey` - The hotkey account - /// * `amount` - The amount of tokens to burn + /// * `amount` - The amount of alpha to burn /// * `netuid` - The subnet ID /// - /// # Errors - /// Returns an error if: - /// * The transaction is not signed by the correct coldkey. - /// * The coldkey does not own the hotkey. - /// * The coldkey does not have enough balance. - /// /// # Events /// Emits a `TokensBurned` event on success. #[pallet::call_index(102)] @@ -1964,13 +1951,13 @@ mod dispatches { DispatchClass::Operational, Pays::Yes ))] - pub fn burn( + pub fn burn_alpha( origin: T::RuntimeOrigin, hotkey: T::AccountId, amount: u64, netuid: u16, ) -> DispatchResult { - Self::do_burn(origin, hotkey, amount, netuid) + Self::do_burn_alpha(origin, hotkey, amount, netuid) } } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index cabd0d14fd..1c87d37ed1 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -276,16 +276,16 @@ mod events { /// (netuid, new_hotkey) SubnetOwnerHotkeySet(u16, T::AccountId), - /// Tokens have been recycled, reducing AlphaOut on a subnet. + /// Alpha has been recycled, reducing AlphaOut on a subnet. /// /// Parameters: - /// (coldkey, hotkey, amount, subnet_id, alpha_recycled) - TokensRecycled(T::AccountId, T::AccountId, u64, u16, u64), + /// (coldkey, hotkey, amount, subnet_id) + AlphaRecycled(T::AccountId, T::AccountId, u64, u16), - /// Tokens have been burned without reducing AlphaOut. + /// Alpha have been burned without reducing AlphaOut. /// /// Parameters: /// (coldkey, hotkey, amount, subnet_id) - TokensBurned(T::AccountId, T::AccountId, u64, u16), + AlphaBurned(T::AccountId, T::AccountId, u64, u16), } } diff --git a/pallets/subtensor/src/staking/mod.rs b/pallets/subtensor/src/staking/mod.rs index ecf8fb8815..6916dda0fa 100644 --- a/pallets/subtensor/src/staking/mod.rs +++ b/pallets/subtensor/src/staking/mod.rs @@ -6,5 +6,6 @@ pub mod helpers; pub mod increase_take; pub mod move_stake; pub mod remove_stake; +pub mod recycle_alpha; pub mod set_children; pub mod stake_utils; diff --git a/pallets/subtensor/src/utils/recycle.rs b/pallets/subtensor/src/staking/recycle_alpha.rs similarity index 50% rename from pallets/subtensor/src/utils/recycle.rs rename to pallets/subtensor/src/staking/recycle_alpha.rs index 37aedcee43..109d26c850 100644 --- a/pallets/subtensor/src/utils/recycle.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -2,19 +2,19 @@ use super::*; use crate::{Error, system::ensure_signed}; impl Pallet { - /// Recycles tokens from a cold/hot key pair, reducing AlphaOut on a subnet + /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet /// /// # Arguments /// /// * `origin` - The origin of the call (must be signed by the coldkey) /// * `hotkey` - The hotkey account - /// * `amount` - The amount of tokens to recycle + /// * `amount` - The amount of alpha to recycle /// * `netuid` - The subnet ID from which to reduce AlphaOut /// /// # Returns /// /// * `DispatchResult` - Success or error - pub fn do_recycle( + pub(crate) fn do_recycle_alpha( origin: T::RuntimeOrigin, hotkey: T::AccountId, amount: u64, @@ -33,57 +33,38 @@ impl Pallet { ); ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, amount), + TotalHotkeyAlpha::::get(&hotkey, netuid) >= amount, Error::::NotEnoughStakeToWithdraw ); - // convert TAO to alpha equivalent - let Some(alpha_amount) = Self::sim_swap_tao_for_alpha(netuid, amount) else { - return Err(Error::::InsufficientLiquidity.into()); - }; - ensure!( - SubnetAlphaOut::::get(netuid) >= alpha_amount, - Error::::NotEnoughAlphaOutToRecycle + SubnetAlphaOut::::get(netuid) >= amount, + Error::::InsufficientLiquidity ); - // remove the amount from the coldkey account - let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, amount)?; - - // update related storages + TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| *v = v.saturating_sub(amount)); SubnetAlphaOut::::mutate(netuid, |total| { - *total = total.saturating_sub(alpha_amount); + *total = total.saturating_sub(amount); }); - SubnetTAO::::mutate(netuid, |v| *v = v.saturating_sub(actual_burn_amount)); - TotalIssuance::::put(TotalIssuance::::get().saturating_sub(actual_burn_amount)); - Self::increase_rao_recycled(netuid, actual_burn_amount); - - // Deposit event - Self::deposit_event(Event::TokensRecycled( - coldkey, - hotkey, - actual_burn_amount, - netuid, - alpha_amount, - )); + Self::deposit_event(Event::AlphaRecycled(coldkey, hotkey, amount, netuid)); Ok(()) } - /// Burns tokens from a cold/hot key pair without reducing AlphaOut + /// Burns alpha from a cold/hot key pair without reducing AlphaOut /// /// # Arguments /// /// * `origin` - The origin of the call (must be signed by the coldkey) /// * `hotkey` - The hotkey account - /// * `amount` - The amount of tokens to burn + /// * `amount` - The "up to" amount of alpha to burn /// * `netuid` - The subnet ID /// /// # Returns /// /// * `DispatchResult` - Success or error - pub fn do_burn( + pub(crate) fn do_burn_alpha( origin: T::RuntimeOrigin, hotkey: T::AccountId, amount: u64, @@ -102,16 +83,19 @@ impl Pallet { ); ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, amount), + TotalHotkeyAlpha::::get(&hotkey, netuid) >= amount, Error::::NotEnoughStakeToWithdraw ); - let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, amount)?; + ensure!( + SubnetAlphaOut::::get(netuid) >= amount, + Error::::InsufficientLiquidity + ); - SubnetTAO::::mutate(netuid, |v| *v = v.saturating_sub(actual_burn_amount)); - TotalIssuance::::put(TotalIssuance::::get().saturating_sub(actual_burn_amount)); + TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| *v = v.saturating_sub(amount)); - Self::deposit_event(Event::TokensBurned(coldkey, hotkey, actual_burn_amount, netuid)); + // Deposit event + Self::deposit_event(Event::AlphaBurned(coldkey, hotkey, amount, netuid)); Ok(()) } diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 577c5c7ac7..8a9fa6b103 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -11,7 +11,7 @@ mod mock; mod move_stake; mod networks; mod neuron_info; -mod recycle; +mod recycle_alpha; mod registration; mod senate; mod serving; diff --git a/pallets/subtensor/src/tests/recycle.rs b/pallets/subtensor/src/tests/recycle_alpha.rs similarity index 65% rename from pallets/subtensor/src/tests/recycle.rs rename to pallets/subtensor/src/tests/recycle_alpha.rs index eeb0e7276b..894a8887a4 100644 --- a/pallets/subtensor/src/tests/recycle.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -28,33 +28,27 @@ fn test_recycle_success() { increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); // get initial total issuance and alpha out - let initial_balance = Balances::free_balance(coldkey); - let initial_alpha = SubnetAlphaOut::::get(netuid); - let initial_net_tao = SubnetTAO::::get(netuid); - // preset total issuance - TotalIssuance::::put(initial_balance + stake); - let initial_issuance = TotalIssuance::::get(); + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); // amount to recycle let recycle_amount = stake / 2; // recycle - assert_ok!(SubtensorModule::recycle( + assert_ok!(SubtensorModule::recycle_alpha( RuntimeOrigin::signed(coldkey), hotkey, recycle_amount, netuid )); - assert!(Balances::free_balance(coldkey) < initial_balance); - assert!(SubnetAlphaOut::::get(netuid) < initial_alpha); - assert!(SubnetTAO::::get(netuid) < initial_net_tao); - assert!(TotalIssuance::::get() < initial_issuance); + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); assert!(System::events().iter().any(|e| { matches!( &e.event, - RuntimeEvent::SubtensorModule(Event::TokensRecycled(..)) + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) ) })); }); @@ -66,42 +60,45 @@ fn test_burn_success() { let coldkey = U256::from(1); let hotkey = U256::from(2); - let subnet_owner_coldkey = U256::from(1001); - let subnet_owner_hotkey = U256::from(1002); - let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); let initial_balance = 1_000_000_000; Balances::make_free_balance_be(&coldkey, initial_balance); + // associate coldkey and hotkey SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); register_ok_neuron(netuid, hotkey, coldkey, 0); + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it let stake = 200_000; increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); - let initial_balance = Balances::free_balance(&coldkey); - let initial_alpha = SubnetAlphaOut::::get(netuid); - let initial_net_tao = SubnetTAO::::get(netuid); - // preset total issuance - TotalIssuance::::put(initial_balance + stake); - let initial_issuance = TotalIssuance::::get(); + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); - let burn = stake / 2; - assert_ok!(SubtensorModule::burn( + // amount to recycle + let burn_amount = stake / 2; + + // burn + assert_ok!(SubtensorModule::burn_alpha( RuntimeOrigin::signed(coldkey), hotkey, - burn, + burn_amount, netuid )); - assert!(Balances::free_balance(coldkey) < initial_balance); - assert!(SubnetAlphaOut::::get(netuid) == initial_alpha); - assert!(SubnetTAO::::get(netuid) < initial_net_tao); - assert!(TotalIssuance::::get() < initial_issuance); + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!(System::events().iter().any(|e| { matches!( &e.event, - RuntimeEvent::SubtensorModule(Event::TokensBurned(..)) + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) ) })); }); @@ -128,7 +125,7 @@ fn test_recycle_errors() { increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); assert_noop!( - SubtensorModule::recycle( + SubtensorModule::recycle_alpha( RuntimeOrigin::signed(coldkey), hotkey, 100_000, @@ -138,7 +135,7 @@ fn test_recycle_errors() { ); assert_noop!( - SubtensorModule::recycle( + SubtensorModule::recycle_alpha( RuntimeOrigin::signed(wrong_coldkey), hotkey, 100_000, @@ -148,7 +145,7 @@ fn test_recycle_errors() { ); assert_noop!( - SubtensorModule::recycle( + SubtensorModule::recycle_alpha( RuntimeOrigin::signed(coldkey), hotkey, 10_000_000_000, // too much @@ -157,12 +154,16 @@ fn test_recycle_errors() { Error::::NotEnoughStakeToWithdraw ); - // Set AlphaOut to 0 to cause InsufficientLiquidity - SubnetAlphaIn::::insert(netuid, 1_000_000); // Ensure there's enough alphaIn - SubnetAlphaOut::::insert(netuid, 0); // But no alphaOut + // make it pass the hotkey alpha check + TotalHotkeyAlpha::::set(hotkey, netuid, SubnetAlphaOut::::get(netuid) + 1); assert_noop!( - SubtensorModule::recycle(RuntimeOrigin::signed(coldkey), hotkey, 100_000, netuid), + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + SubnetAlphaOut::::get(netuid) + 1, + netuid + ), Error::::InsufficientLiquidity ); }); @@ -189,7 +190,17 @@ fn test_burn_errors() { increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_amount, netuid); assert_noop!( - SubtensorModule::burn( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + 99 // non-existent subnet + ), + Error::::SubNetworkDoesNotExist + ); + + assert_noop!( + SubtensorModule::burn_alpha( RuntimeOrigin::signed(wrong_coldkey), hotkey, 100_000, @@ -199,7 +210,7 @@ fn test_burn_errors() { ); assert_noop!( - SubtensorModule::burn( + SubtensorModule::burn_alpha( RuntimeOrigin::signed(coldkey), hotkey, 10_000_000_000, // too much @@ -207,5 +218,18 @@ fn test_burn_errors() { ), Error::::NotEnoughStakeToWithdraw ); + + // make it pass the hotkey alpha check + TotalHotkeyAlpha::::set(hotkey, netuid, SubnetAlphaOut::::get(netuid) + 1); + + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + SubnetAlphaOut::::get(netuid) + 1, + netuid + ), + Error::::InsufficientLiquidity + ); }); } diff --git a/pallets/subtensor/src/utils/mod.rs b/pallets/subtensor/src/utils/mod.rs index 9d19ca8386..909ad89593 100644 --- a/pallets/subtensor/src/utils/mod.rs +++ b/pallets/subtensor/src/utils/mod.rs @@ -2,6 +2,5 @@ use super::*; pub mod identity; pub mod misc; pub mod rate_limiting; -pub mod recycle; #[cfg(feature = "try-runtime")] pub mod try_state; From 114938a5e6271135b62aa67dc74c519e52e535ff Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 19 Mar 2025 13:31:36 +0100 Subject: [PATCH 030/121] Add benchmarks for burn/recycle alpha --- pallets/subtensor/src/benchmarks.rs | 63 +++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 30d1f39e11..47dd7a5bc5 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -594,5 +594,68 @@ batch_reveal_weights { version_keys ) +benchmark_recycle_alpha { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + + // Add alpha to the hotkey + let alpha_amount: u64 = 1000000; + TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + // Verify the alpha has been added + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + +}: recycle_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) + +benchmark_burn_alpha { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid = 1; + let tempo = 1; + let seed = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + + // Add alpha to the hotkey + let alpha_amount: u64 = 1000000; + TotalHotkeyAlpha::::insert(&hotkey, netuid, alpha_amount); + SubnetAlphaOut::::insert(netuid, alpha_amount * 2); + + // Verify the alpha has been added + assert_eq!(TotalHotkeyAlpha::::get(&hotkey, netuid), alpha_amount); + +}: burn_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) } From 1aba3f7a8dd79f980685719cce543fd294a0500c Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 19 Mar 2025 13:56:47 +0100 Subject: [PATCH 031/121] Reformat --- pallets/subtensor/src/macros/dispatches.rs | 4 ++-- pallets/subtensor/src/staking/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 14813c9875..9f61aca60c 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1941,7 +1941,7 @@ mod dispatches { /// * `origin` - The origin of the call (must be signed by the coldkey) /// * `hotkey` - The hotkey account /// * `amount` - The amount of alpha to burn - /// * `netuid` - The subnet ID + /// * `netuid` - The subnet ID /// /// # Events /// Emits a `TokensBurned` event on success. @@ -1955,7 +1955,7 @@ mod dispatches { origin: T::RuntimeOrigin, hotkey: T::AccountId, amount: u64, - netuid: u16, + netuid: u16, ) -> DispatchResult { Self::do_burn_alpha(origin, hotkey, amount, netuid) } diff --git a/pallets/subtensor/src/staking/mod.rs b/pallets/subtensor/src/staking/mod.rs index 6916dda0fa..570658631a 100644 --- a/pallets/subtensor/src/staking/mod.rs +++ b/pallets/subtensor/src/staking/mod.rs @@ -5,7 +5,7 @@ pub mod decrease_take; pub mod helpers; pub mod increase_take; pub mod move_stake; -pub mod remove_stake; pub mod recycle_alpha; +pub mod remove_stake; pub mod set_children; pub mod stake_utils; From 2fb469fbb022b279a573180f31a421c174387cdb Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 19 Mar 2025 14:45:47 +0100 Subject: [PATCH 032/121] Update spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 2bd96a1e1a..0f362ada54 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -205,7 +205,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 249, + spec_version: 250, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From dbd4a3a97f0e3e6c31ea025fa8fae223d8c3b373 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 19 Mar 2025 14:59:08 +0100 Subject: [PATCH 033/121] Update spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 25799a75c1..94e82ca8f3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -205,7 +205,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 252, + spec_version: 253, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From cc39b6e747edab4dbbc128c2bdf650234638d5b5 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 10:52:23 +0800 Subject: [PATCH 034/121] feature start call --- .../subtensor/src/coinbase/run_coinbase.rs | 3 +- pallets/subtensor/src/lib.rs | 6 ++- pallets/subtensor/src/macros/config.rs | 3 ++ pallets/subtensor/src/macros/dispatches.rs | 14 +++++ pallets/subtensor/src/macros/errors.rs | 4 ++ pallets/subtensor/src/macros/events.rs | 6 +++ pallets/subtensor/src/macros/hooks.rs | 4 +- .../migrate_set_last_emission_block_number.rs | 53 +++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/subnets/subnet.rs | 34 ++++++++++++ pallets/subtensor/src/tests/migration.rs | 22 ++++++++ pallets/subtensor/src/tests/mock.rs | 2 + runtime/src/lib.rs | 2 + 13 files changed, 151 insertions(+), 3 deletions(-) create mode 100644 pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 7836423868..92d7e3722a 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -37,10 +37,11 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::debug!("Current block: {:?}", current_block); - // --- 1. Get all netuids (filter out root.) + // --- 1. Get all netuids (filter out root and new subnet) let subnets: Vec = Self::get_all_subnet_netuids() .into_iter() .filter(|netuid| *netuid != 0) + .filter(|netuid| LastEmissionBlockNumber::::get(*netuid).is_some()) .collect(); log::debug!("All subnet netuids: {:?}", subnets); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 03438aa637..3df312f92b 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1125,7 +1125,11 @@ pub mod pallet { /// ============================ /// ==== Subnet Parameters ===== /// ============================ - #[pallet::storage] // --- MAP ( netuid ) --> subnet mechanism + /// --- MAP ( netuid ) --> block number of last emission + #[pallet::storage] + pub type LastEmissionBlockNumber = StorageMap<_, Identity, u16, u64, OptionQuery>; + /// --- MAP ( netuid ) --> subnet mechanism + #[pallet::storage] pub type SubnetMechanism = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultZeroU16>; #[pallet::storage] diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index af448c8771..889c7e5c5f 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -210,5 +210,8 @@ mod config { /// Initial EMA price halving period #[pallet::constant] type InitialEmaPriceHalvingPeriod: Get; + /// Block number for a new subnet accept the start call extrinsic. + #[pallet::constant] + type DurationOfStartCall: Get; } } diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index bcd2bb33f5..1635a78bb6 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1909,5 +1909,19 @@ mod dispatches { Ok(()) } + + /// Initiates a call on a subnet. + /// + #[pallet::call_index(92)] + #[pallet::weight(( + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 3)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { + let _ = Self::do_start_call(origin, netuid); + + Ok(()) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 1f189cd2f6..251669c860 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -195,5 +195,9 @@ mod errors { ActivityCutoffTooLow, /// Call is disabled CallDisabled, + /// LastEmissionBlockNumber is already set. + LastEmissionBlockNumberAlreadySet, + /// need wait for more blocks to accept the start call extrinsic. + NeedMoreBlocksToStarCall, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 834aa901fa..722ecd4205 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -275,5 +275,11 @@ mod events { /// Parameters: /// (netuid, new_hotkey) SubnetOwnerHotkeySet(u16, T::AccountId), + /// LastEmissionBlockNumber is set via start call extrinsic + /// + /// Parameters: + /// netuid + /// block number + LastEmissionBlockNumberSet(u16, u64), } } diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index df9dffabca..68cd2a7623 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -81,7 +81,9 @@ mod hooks { // Remove Stake map entries .saturating_add(migrations::migrate_remove_stake_map::migrate_remove_stake_map::()) // Remove unused maps entries - .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()); + .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()) + // Set last emission block number for all existed subnets before start call feature applied + .saturating_add(migrations::migrate_set_last_emission_block_number::migrate_set_last_emission_block_number::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs b/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs new file mode 100644 index 0000000000..bc1f7b9abd --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs @@ -0,0 +1,53 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_set_last_emission_block_number() -> Weight { + let migration_name = b"migrate_set_last_emission_block_number".to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{:?}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Set the last emission block for all subnets except root + // ------------------------------ + let netuids = Pallet::::get_all_subnet_netuids(); + let current_block_number = Pallet::::get_current_block_as_u64(); + for netuid in netuids.iter() { + if *netuid != 0 { + LastEmissionBlockNumber::::insert(netuid, current_block_number); + } + } + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().reads(2)); + + if netuids.is_empty() { + weight = weight.saturating_add(T::DbWeight::get().writes(1_u64)); + } else { + weight = weight.saturating_add(T::DbWeight::get().writes(netuids.len() as u64)); + } + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 6af6ad2a56..cada8a9997 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -11,6 +11,7 @@ pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_remove_stake_map; pub mod migrate_remove_unused_maps_and_values; +pub mod migrate_set_last_emission_block_number; pub mod migrate_set_min_burn; pub mod migrate_set_min_difficulty; pub mod migrate_stake_threshold; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index bf1806da14..f7f26e97e1 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -320,4 +320,38 @@ impl Pallet { ); } } + + /// Initiates a call on a subnet. + /// + pub fn do_start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + Self::ensure_subnet_owner(origin, netuid)?; + ensure!( + LastEmissionBlockNumber::::get(netuid).is_none(), + Error::::LastEmissionBlockNumberAlreadySet + ); + + let registration_block_number = NetworkRegisteredAt::::get(netuid); + let current_block_number = Self::get_current_block_as_u64(); + + ensure!( + current_block_number + > registration_block_number.saturating_add(T::DurationOfStartCall::get()), + Error::::NeedMoreBlocksToStarCall + ); + + LastEmissionBlockNumber::::insert(netuid, current_block_number); + Self::deposit_event(Event::LastEmissionBlockNumberSet( + netuid, + current_block_number, + )); + Ok(()) + } + + pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { + LastEmissionBlockNumber::::get(netuid).is_some() + } } diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index c0cbe1b81a..549bc329dd 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -11,6 +11,7 @@ use frame_support::{ traits::{StorageInstance, StoredMap}, weights::Weight, }; + use frame_system::Config; use sp_core::{H256, U256, crypto::Ss58Codec}; use sp_io::hashing::twox_128; @@ -416,3 +417,24 @@ fn test_migrate_subnet_volume() { assert_eq!(new_value, Some(old_value as u128)); }); } + +#[test] +fn test_migrate_set_last_emission_block_number() { + new_test_ext(1).execute_with(|| { + let netuids: [u16; 3] = [1, 2, 3]; + let block_number = 100; + for netuid in netuids.iter() { + add_network(*netuid, 1, 0); + } + run_to_block(block_number); + let weight = crate::migrations::migrate_set_last_emission_block_number::migrate_set_last_emission_block_number::(); + + let expected_weight: Weight = ::DbWeight::get().reads(3) + ::DbWeight::get().writes(netuids.len() as u64); + assert_eq!(weight, expected_weight); + + assert_eq!(LastEmissionBlockNumber::::get(0), None); + for netuid in netuids.iter() { + assert_eq!(LastEmissionBlockNumber::::get(netuid), Some(block_number)); + } +}); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 0d979a6126..8afffb2a48 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -185,6 +185,7 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days pub const InitialTaoWeight: u64 = 0; // 100% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // Default as 7 days } // Configure collective pallet for council @@ -408,6 +409,7 @@ impl crate::Config for Test { type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } pub struct OriginPrivilegeCmp; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 25799a75c1..f9e05b66b3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1018,6 +1018,7 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const SubtensorInitialTaoWeight: u64 = 971_718_665_099_567_868; // 0.05267697438728329% tao weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // 7 days } impl pallet_subtensor::Config for Runtime { @@ -1082,6 +1083,7 @@ impl pallet_subtensor::Config for Runtime { type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } use sp_runtime::BoundedVec; From dbce5ac0a496bddb4cf2af9e456443a8fba3dc45 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 12:22:57 +0800 Subject: [PATCH 035/121] commit Cargo.lock --- pallets/admin-utils/src/tests/mock.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index fc0d016198..43a40ec32e 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -135,6 +135,7 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days pub const InitialTaoWeight: u64 = u64::MAX/10; // 10% global weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks + pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // 7 days } impl pallet_subtensor::Config for Test { @@ -199,6 +200,7 @@ impl pallet_subtensor::Config for Test { type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; type InitialTaoWeight = InitialTaoWeight; type InitialEmaPriceHalvingPeriod = InitialEmaPriceHalvingPeriod; + type DurationOfStartCall = DurationOfStartCall; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] From 93dcb79fc3e2deab06e1c30c289c6ff2b757e325 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 13:07:33 +0800 Subject: [PATCH 036/121] fix test cases --- pallets/subtensor/src/tests/mock.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 8afffb2a48..297f67c84a 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -664,6 +664,7 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::init_new_network(netuid, tempo); SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); + LastEmissionBlockNumber::::insert(netuid, 0); } #[allow(dead_code)] @@ -678,6 +679,7 @@ pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { )); NetworkRegistrationAllowed::::insert(netuid, true); NetworkPowRegistrationAllowed::::insert(netuid, true); + LastEmissionBlockNumber::::insert(netuid, 0); netuid } From d4d6748be639409dd0432828c5e1e7097db107d8 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 13:10:29 +0800 Subject: [PATCH 037/121] update runtime version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 251d4537ba..8b390aaa59 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -205,7 +205,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 253, + spec_version: 254, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From d6321c0e8d407660e616111357aec81adceb34ef Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 18:24:05 +0800 Subject: [PATCH 038/121] commit Cargo.lock --- .../subtensor/src/coinbase/run_coinbase.rs | 3 +++ pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/macros/errors.rs | 2 +- pallets/subtensor/src/subnets/subnet.rs | 4 ++-- pallets/subtensor/src/tests/mock.rs | 22 +++++++++++++++++++ pallets/subtensor/src/tests/mod.rs | 1 + 6 files changed, 30 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 92d7e3722a..f5292281db 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -245,6 +245,9 @@ impl Pallet { pending_swapped, owner_cut, ); + + // Set last emission block + LastEmissionBlockNumber::::insert(netuid, Self::get_current_block_as_u64()) } else { // Increment BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index ed7507ffc9..51d5dcecf0 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1919,7 +1919,7 @@ mod dispatches { Pays::Yes ))] pub fn start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { - let _ = Self::do_start_call(origin, netuid); + Self::do_start_call(origin, netuid)?; Ok(()) } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index c2696fcb8b..f0e40d4207 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -198,7 +198,7 @@ mod errors { /// LastEmissionBlockNumber is already set. LastEmissionBlockNumberAlreadySet, /// need wait for more blocks to accept the start call extrinsic. - NeedMoreBlocksToStarCall, + NeedWaitingMoreBlocksToStarCall, /// Not enough AlphaOut on the subnet to recycle NotEnoughAlphaOutToRecycle, } diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index f7f26e97e1..3f6a58371f 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -339,8 +339,8 @@ impl Pallet { ensure!( current_block_number - > registration_block_number.saturating_add(T::DurationOfStartCall::get()), - Error::::NeedMoreBlocksToStarCall + >= registration_block_number.saturating_add(T::DurationOfStartCall::get()), + Error::::NeedWaitingMoreBlocksToStarCall ); LastEmissionBlockNumber::::insert(netuid, current_block_number); diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 297f67c84a..525ab2b58d 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -667,6 +667,13 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { LastEmissionBlockNumber::::insert(netuid, 0); } +#[allow(dead_code)] +pub fn add_network_without_emission_block(netuid: u16, tempo: u16, _modality: u16) { + SubtensorModule::init_new_network(netuid, tempo); + SubtensorModule::set_network_registration_allowed(netuid, true); + SubtensorModule::set_network_pow_registration_allowed(netuid, true); +} + #[allow(dead_code)] pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { let netuid = SubtensorModule::get_next_netuid(); @@ -683,6 +690,21 @@ pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { netuid } +#[allow(dead_code)] +pub fn add_dynamic_network_without_emission_block(hotkey: &U256, coldkey: &U256) -> u16 { + let netuid = SubtensorModule::get_next_netuid(); + let lock_cost = SubtensorModule::get_network_lock_cost(); + SubtensorModule::add_balance_to_coldkey_account(coldkey, lock_cost); + + assert_ok!(SubtensorModule::register_network( + RawOrigin::Signed(*coldkey).into(), + *hotkey + )); + NetworkRegistrationAllowed::::insert(netuid, true); + NetworkPowRegistrationAllowed::::insert(netuid, true); + netuid +} + // Helper function to set up a neuron with stake #[allow(dead_code)] pub fn setup_neuron_with_stake(netuid: u16, hotkey: U256, coldkey: U256, stake: u64) { diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 8a9fa6b103..efd45ddef1 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -17,6 +17,7 @@ mod senate; mod serving; mod staking; mod staking2; +mod subnet; mod swap_coldkey; mod swap_hotkey; mod uids; From fdbbcec9d162fc53b108d009f5e5db3d6570bf76 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 18:24:34 +0800 Subject: [PATCH 039/121] more unit test --- pallets/subtensor/src/tests/subnet.rs | 222 ++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) create mode 100644 pallets/subtensor/src/tests/subnet.rs diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs new file mode 100644 index 0000000000..978c0509dd --- /dev/null +++ b/pallets/subtensor/src/tests/subnet.rs @@ -0,0 +1,222 @@ +use super::mock::*; +use crate::*; +use frame_support::{assert_noop, assert_ok}; +use frame_system::Config; +use sp_core::U256; + +/*************************** + pub fn do_start_call() tests +*****************************/ + +#[test] +fn test_do_start_call_ok() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_eq!( + LastEmissionBlockNumber::::get(netuid), + Some(block_number) + ); + }); +} + +#[test] +fn test_do_start_call_fail_with_not_existed_subnet() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let coldkey_account_id = U256::from(0); + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_do_start_call_fail_not_owner() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let wrong_owner_account_id = U256::from(2); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + System::set_block_number(System::block_number() + DurationOfStartCall::get()); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(wrong_owner_account_id), + netuid + ), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_do_start_call_fail_with_cannot_start_call_now() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::NeedWaitingMoreBlocksToStarCall + ); + }); +} + +#[test] +fn test_do_start_call_fail_for_set_again() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_noop!( + SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + ), + Error::::LastEmissionBlockNumberAlreadySet + ); + }); +} + +#[test] +fn test_do_start_call_ok_with_updated_block_number_after_coinbase() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let coldkey_account_id = U256::from(0); + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + //add network + SubtensorModule::set_burn(netuid, burn_cost); + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + + // Give it some $$$ in his coldkey balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + assert_eq!(SubnetOwner::::get(netuid), coldkey_account_id); + + let block_number = System::block_number() + DurationOfStartCall::get(); + System::set_block_number(block_number); + + assert_ok!(SubtensorModule::start_call( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + assert_eq!( + LastEmissionBlockNumber::::get(netuid), + Some(block_number) + ); + + step_block(tempo); + match LastEmissionBlockNumber::::get(netuid) { + Some(new_emission_block_number) => assert!(new_emission_block_number > block_number), + None => assert!(LastEmissionBlockNumber::::get(netuid).is_some()), + } + }); +} From b38e79d1b0fce003a5224f215a49da89a2b13e99 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 20 Mar 2025 22:58:28 +0800 Subject: [PATCH 040/121] add doc --- pallets/subtensor/src/macros/dispatches.rs | 7 ++++++- pallets/subtensor/src/subnets/subnet.rs | 21 ++++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 51d5dcecf0..5b9eb14e1d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1912,6 +1912,12 @@ mod dispatches { /// Initiates a call on a subnet. /// + /// # Arguments + /// * `origin` - The origin of the call, which must be signed by the subnet owner. + /// * `netuid` - The unique identifier of the subnet on which the call is being initiated. + /// + /// # Events + /// Emits a `CallInitiated` event on success. #[pallet::call_index(92)] #[pallet::weight(( Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 3)), @@ -1920,7 +1926,6 @@ mod dispatches { ))] pub fn start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { Self::do_start_call(origin, netuid)?; - Ok(()) } diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 3f6a58371f..fc29703cc7 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -321,8 +321,27 @@ impl Pallet { } } - /// Initiates a call on a subnet. + /// Execute the start call for a subnet. /// + /// This function is used to trigger the start call process for a subnet identified by `netuid`. + /// It ensures that the subnet exists, the caller is the subnet owner, + /// and the last emission block number has not been set yet. + /// It then sets the last emission block number to the current block number. + /// + /// # Parameters + /// + /// * `origin`: The origin of the call, which is used to ensure the caller is the subnet owner. + /// * `netuid`: The unique identifier of the subnet for which the start call process is being initiated. + /// + /// # Raises + /// + /// * `Error::::SubNetworkDoesNotExist`: If the subnet does not exist. + /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. + /// * `Error::::LastEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. + /// + /// # Returns + /// + /// * `DispatchResult`: A result indicating the success or failure of the operation. pub fn do_start_call(origin: T::RuntimeOrigin, netuid: u16) -> DispatchResult { ensure!( Self::if_subnet_exist(netuid), From 0686b061df76bdc6422404975c2464634090051a Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 13:20:10 -0700 Subject: [PATCH 041/121] update actions version --- .github/workflows/docker-localnet.yml | 8 ++++---- .github/workflows/docker.yml | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index cd6460bfe4..c2afccae66 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -45,20 +45,20 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . file: Dockerfile-localnet diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2b36e37282..3eb52ab86f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -47,20 +47,20 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . push: true @@ -93,20 +93,20 @@ jobs: ref: ${{ env.ref }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . push: true From 533c291aedd6c29fc2de71b6c847e06c5a072d50 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 13:44:36 -0700 Subject: [PATCH 042/121] upload docker image to artifacts --- .github/workflows/check-docker-localnet.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-docker-localnet.yml b/.github/workflows/check-docker-localnet.yml index 126b718d8c..8cecb08ccb 100644 --- a/.github/workflows/check-docker-localnet.yml +++ b/.github/workflows/check-docker-localnet.yml @@ -12,10 +12,19 @@ jobs: uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build Docker Image run: docker build -f Dockerfile-localnet -t localnet . + + - name: Save Docker Image as Tar + run: docker save -o subtensor-localnet.tar localnet + + - name: Upload Docker Image as Artifact + uses: actions/upload-artifact@v4 + with: + name: subtensor-localnet + path: subtensor-localnet.tar From 498d87ee5bcb4d50146b1954a24324180b6b4d94 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 13:45:21 -0700 Subject: [PATCH 043/121] add `on.workflow_run` trigger --- .github/workflows/check-btcli-tests.yml | 26 +++++-------------------- .github/workflows/check-sdk-tests.yml | 26 +++++-------------------- 2 files changed, 10 insertions(+), 42 deletions(-) diff --git a/.github/workflows/check-btcli-tests.yml b/.github/workflows/check-btcli-tests.yml index 1307774742..73d0777695 100644 --- a/.github/workflows/check-btcli-tests.yml +++ b/.github/workflows/check-btcli-tests.yml @@ -9,6 +9,11 @@ concurrency: cancel-in-progress: true on: + workflow_run: + workflows: ["Build Localnet Docker Image"] + types: + - completed + pull_request: branches: - devnet @@ -97,32 +102,11 @@ jobs: echo "::set-output name=test-files::$test_files" shell: bash - pull-docker-image: - needs: check-labels - runs-on: ubuntu-latest - if: always() && needs.check-labels.outputs.run-cli-tests == 'true' - steps: - - name: Log in to GitHub Container Registry - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $GITHUB_ACTOR --password-stdin - - - name: Pull Docker Image - run: docker pull ghcr.io/opentensor/subtensor-localnet:latest - - - name: Save Docker Image to Cache - run: docker save -o subtensor-localnet.tar ghcr.io/opentensor/subtensor-localnet:latest - - - name: Upload Docker Image as Artifact - uses: actions/upload-artifact@v4 - with: - name: subtensor-localnet - path: subtensor-localnet.tar - # main job run-e2e-tests: needs: - check-labels - find-e2e-tests - - pull-docker-image if: always() && needs.check-labels.outputs.run-cli-tests == 'true' runs-on: ubuntu-latest diff --git a/.github/workflows/check-sdk-tests.yml b/.github/workflows/check-sdk-tests.yml index d54308c17b..e590233af9 100644 --- a/.github/workflows/check-sdk-tests.yml +++ b/.github/workflows/check-sdk-tests.yml @@ -9,6 +9,11 @@ concurrency: cancel-in-progress: true on: + workflow_run: + workflows: [ "Build Localnet Docker Image" ] + types: + - completed + pull_request: branches: - devnet @@ -96,32 +101,11 @@ jobs: echo "::set-output name=test-files::$test_files" shell: bash - pull-docker-image: - needs: check-labels - runs-on: ubuntu-latest - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' - steps: - - name: Log in to GitHub Container Registry - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $GITHUB_ACTOR --password-stdin - - - name: Pull Docker Image - run: docker pull ghcr.io/opentensor/subtensor-localnet:devnet-ready - - - name: Save Docker Image to Cache - run: docker save -o subtensor-localnet.tar ghcr.io/opentensor/subtensor-localnet:devnet-ready - - - name: Upload Docker Image as Artifact - uses: actions/upload-artifact@v4 - with: - name: subtensor-localnet - path: subtensor-localnet.tar - # main job run-e2e-tests: needs: - check-labels - find-e2e-tests - - pull-docker-image if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' runs-on: ubuntu-latest From c6119665e7f1c438dbe0799a1c49d234b9c109c0 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 14:26:50 -0700 Subject: [PATCH 044/121] add actions/download-artifact name --- .github/workflows/check-btcli-tests.yml | 1 + .github/workflows/check-sdk-tests.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/check-btcli-tests.yml b/.github/workflows/check-btcli-tests.yml index 73d0777695..ea554ebbb3 100644 --- a/.github/workflows/check-btcli-tests.yml +++ b/.github/workflows/check-btcli-tests.yml @@ -164,6 +164,7 @@ jobs: uses: actions/download-artifact@v4 with: name: subtensor-localnet + path: subtensor-localnet.tar - name: Load Docker Image run: docker load -i subtensor-localnet.tar diff --git a/.github/workflows/check-sdk-tests.yml b/.github/workflows/check-sdk-tests.yml index e590233af9..8c01c819a6 100644 --- a/.github/workflows/check-sdk-tests.yml +++ b/.github/workflows/check-sdk-tests.yml @@ -163,6 +163,7 @@ jobs: uses: actions/download-artifact@v4 with: name: subtensor-localnet + path: subtensor-localnet.tar - name: Load Docker Image run: docker load -i subtensor-localnet.tar From eed9be646095258fafff1d6e61207fe3e97839ee Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 14:49:16 -0700 Subject: [PATCH 045/121] delete, moved this logic to e2e related workflow --- .github/workflows/docker-localnet.yml | 69 --------------------------- 1 file changed, 69 deletions(-) delete mode 100644 .github/workflows/docker-localnet.yml diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml deleted file mode 100644 index c2afccae66..0000000000 --- a/.github/workflows/docker-localnet.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Publish Localnet Docker Image - -on: - release: - types: [published] - workflow_dispatch: - inputs: - branch-or-tag: - description: "Branch or tag to use for the Docker image tag and ref to checkout (optional)" - required: false - default: "" - push: - branches: - - devnet-ready - -permissions: - contents: read - packages: write - actions: read - security-events: write - -jobs: - publish: - runs-on: SubtensorCI - - steps: - - name: Determine Docker tag and ref - id: tag - run: | - branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" - echo "Determined branch or tag: $branch_or_tag" - echo "tag=$branch_or_tag" >> $GITHUB_ENV - echo "ref=$branch_or_tag" >> $GITHUB_ENV - - # Check if this is a tagged release (not devnet-ready/devnet/testnet) - if [[ "$branch_or_tag" != "devnet-ready" ]]; then - echo "latest_tag=true" >> $GITHUB_ENV - else - echo "latest_tag=false" >> $GITHUB_ENV - fi - - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.ref }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push Docker image - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile-localnet - push: true - platforms: linux/amd64,linux/arm64 - tags: | - ghcr.io/${{ github.repository }}-localnet:${{ env.tag }} - ${{ env.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} From ce18e25c7f7f1e8ad4d0bf5b732ae5680722437d Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 14:49:43 -0700 Subject: [PATCH 046/121] combine 2 workflow to one --- ....yml => check-bittensor-e2e-tests.yml.yml} | 158 ++++++++++++++-- .github/workflows/check-sdk-tests.yml | 175 ------------------ 2 files changed, 138 insertions(+), 195 deletions(-) rename .github/workflows/{check-btcli-tests.yml => check-bittensor-e2e-tests.yml.yml} (51%) delete mode 100644 .github/workflows/check-sdk-tests.yml diff --git a/.github/workflows/check-btcli-tests.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml similarity index 51% rename from .github/workflows/check-btcli-tests.yml rename to .github/workflows/check-bittensor-e2e-tests.yml.yml index ea554ebbb3..e8c679a14f 100644 --- a/.github/workflows/check-btcli-tests.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -9,11 +9,6 @@ concurrency: cancel-in-progress: true on: - workflow_run: - workflows: ["Build Localnet Docker Image"] - types: - - completed - pull_request: branches: - devnet @@ -50,14 +45,14 @@ jobs: uses: actions-ecosystem/action-add-labels@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - labels: run-bittensor-cli-tests + labels: run-bittensor-e2e-tests - check-labels: + check-label: needs: apply-label-to-new-pr runs-on: ubuntu-latest if: always() outputs: - run-cli-tests: ${{ steps.get-labels.outputs.run-cli-tests }} + run-bittensor-e2e-tests: ${{ steps.get-labels.outputs.run-bittensor-e2e-tests }} steps: - name: Check out repository uses: actions/checkout@v4 @@ -67,18 +62,18 @@ jobs: run: | LABELS=$(gh pr view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') echo "Current labels: $LABELS" - if echo "$LABELS" | grep -q "run-bittensor-cli-tests"; then - echo "run-cli-tests=true" >> $GITHUB_ENV - echo "::set-output name=run-cli-tests::true" + if echo "$LABELS" | grep -q "run-bittensor-e2e-tests"; then + echo "run-bittensor-e2e-tests=true" >> $GITHUB_ENV + echo "::set-output name=run-bittensor-e2e-tests::true" else - echo "run-cli-tests=false" >> $GITHUB_ENV - echo "::set-output name=run-cli-tests::false" + echo "run-bittensor-e2e-tests=false" >> $GITHUB_ENV + echo "::set-output name=run-bittensor-e2e-tests::false" fi env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - find-e2e-tests: - needs: check-labels + find-btcli-e2e-tests: + needs: check-label if: always() && needs.check-labels.outputs.run-cli-tests == 'true' runs-on: ubuntu-latest outputs: @@ -102,12 +97,62 @@ jobs: echo "::set-output name=test-files::$test_files" shell: bash - # main job - run-e2e-tests: - needs: - - check-labels - - find-e2e-tests + find-sdk-e2e-tests: + needs: check-label + if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' + runs-on: ubuntu-latest + outputs: + test-files: ${{ steps.get-tests.outputs.test-files }} + steps: + - name: Research preparation + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/bittensor.git + + - name: Checkout + working-directory: ${{ github.workspace }}/bittensor + run: git checkout staging + + - name: Install dependencies + run: sudo apt-get install -y jq + + - name: Find e2e test files + id: get-tests + run: | + test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') + echo "::set-output name=test-files::$test_files" + shell: bash + + build-image: + needs: check-label + runs-on: SubtensorCI + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker Image + run: docker build -f Dockerfile-localnet -t localnet . + + - name: Save Docker Image as Tar + run: docker save -o subtensor-localnet.tar localnet + + - name: Upload Docker Image as Artifact + uses: actions/upload-artifact@v4 + with: + name: subtensor-localnet + path: subtensor-localnet.tar + + # main btcli job + run-btcli-e2e-tests: + needs: + - check-label + - find-btcli-e2e-tests + - build-image if: always() && needs.check-labels.outputs.run-cli-tests == 'true' runs-on: ubuntu-latest strategy: @@ -174,3 +219,76 @@ jobs: run: | source ${{ github.workspace }}/venv/bin/activate uv run pytest ${{ matrix.test-file }} -s + + # main sdk job + run-sdk-e2e-tests: + needs: + - check-label + - find-sdk-e2e-tests + - build-image + if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' + runs-on: ubuntu-latest + strategy: + fail-fast: false + max-parallel: 16 + matrix: + rust-branch: + - stable + rust-target: + - x86_64-unknown-linux-gnu + os: + - ubuntu-latest + test-file: ${{ fromJson(needs.find-e2e-tests.outputs.test-files) }} + + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + TARGET: ${{ matrix.rust-target }} + + timeout-minutes: 60 + name: "sdk: ${{ matrix.test-file }}" + steps: + - name: Check-out repository + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }} + run: uv venv ${{ github.workspace }}/venv + + - name: Clone Bittensor SDK repo + working-directory: ${{ github.workspace }} + run: git clone https://github.com/opentensor/bittensor.git + + - name: Setup Bittensor SDK from cloned repo + working-directory: ${{ github.workspace }}/bittensor + run: | + source ${{ github.workspace }}/venv/bin/activate + git checkout staging + git fetch origin staging + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + + - name: Install uv dependencies + working-directory: ${{ github.workspace }}/bittensor + run: uv sync --all-extras --dev + + - name: Download Cached Docker Image + uses: actions/download-artifact@v4 + with: + name: subtensor-localnet + path: subtensor-localnet.tar + + - name: Load Docker Image + run: docker load -i subtensor-localnet.tar + + - name: Run tests + working-directory: ${{ github.workspace }}/bittensor + run: | + source ${{ github.workspace }}/venv/bin/activate + uv run pytest ${{ matrix.test-file }} -s \ No newline at end of file diff --git a/.github/workflows/check-sdk-tests.yml b/.github/workflows/check-sdk-tests.yml deleted file mode 100644 index 8c01c819a6..0000000000 --- a/.github/workflows/check-sdk-tests.yml +++ /dev/null @@ -1,175 +0,0 @@ -name: Bittensor SDK Test - -permissions: - pull-requests: write - contents: read - -concurrency: - group: e2e-sdk-${{ github.ref }} - cancel-in-progress: true - -on: - workflow_run: - workflows: [ "Build Localnet Docker Image" ] - types: - - completed - - pull_request: - branches: - - devnet - - devnet-ready - - testnet - - testnet-ready - - main - types: [opened, synchronize, reopened, labeled, unlabeled] - -env: - CARGO_TERM_COLOR: always - VERBOSE: ${{ github.event.inputs.verbose }} - -jobs: - apply-label-to-new-pr: - runs-on: ubuntu-latest - if: ${{ github.event.pull_request.draft == false }} - outputs: - should_continue_sdk: ${{ steps.check.outputs.should_continue_sdk }} - steps: - - name: Check - id: check - run: | - ACTION="${{ github.event.action }}" - if [[ "$ACTION" == "opened" || "$ACTION" == "reopened" ]]; then - echo "should_continue_sdk=true" >> $GITHUB_OUTPUT - else - echo "should_continue_sdk=false" >> $GITHUB_OUTPUT - fi - shell: bash - - - name: Add label - if: steps.check.outputs.should_continue_sdk == 'true' - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - labels: run-bittensor-sdk-tests - - check-labels: - needs: apply-label-to-new-pr - runs-on: ubuntu-latest - if: always() - outputs: - run-sdk-tests: ${{ steps.get-labels.outputs.run-sdk-tests }} - steps: - - name: Check out repository - uses: actions/checkout@v4 - - - name: Get labels from PR - id: get-labels - run: | - sleep 5 - LABELS=$(gh api repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels --jq '.[].name') - echo "Current labels: $LABELS" - if echo "$LABELS" | grep -q "run-bittensor-sdk-tests"; then - echo "run-sdk-tests=true" >> $GITHUB_OUTPUT - else - echo "run-sdk-tests=false" >> $GITHUB_OUTPUT - fi - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - find-e2e-tests: - needs: check-labels - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' - runs-on: ubuntu-latest - outputs: - test-files: ${{ steps.get-tests.outputs.test-files }} - steps: - - name: Research preparation - working-directory: ${{ github.workspace }} - run: git clone https://github.com/opentensor/bittensor.git - - - name: Checkout - working-directory: ${{ github.workspace }}/bittensor - run: git checkout staging - - - name: Install dependencies - run: sudo apt-get install -y jq - - - name: Find e2e test files - id: get-tests - run: | - test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "::set-output name=test-files::$test_files" - shell: bash - - # main job - run-e2e-tests: - needs: - - check-labels - - find-e2e-tests - - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' - runs-on: ubuntu-latest - strategy: - fail-fast: false - max-parallel: 16 - matrix: - rust-branch: - - stable - rust-target: - - x86_64-unknown-linux-gnu - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-e2e-tests.outputs.test-files) }} - - env: - RELEASE_NAME: development - RUSTV: ${{ matrix.rust-branch }} - RUST_BACKTRACE: full - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} - - timeout-minutes: 60 - name: "sdk: ${{ matrix.test-file }}" - steps: - - name: Check-out repository - uses: actions/checkout@v4 - - - name: Install uv - uses: astral-sh/setup-uv@v5 - - - name: Create Python virtual environment - working-directory: ${{ github.workspace }} - run: uv venv ${{ github.workspace }}/venv - - - name: Clone Bittensor SDK repo - working-directory: ${{ github.workspace }} - run: git clone https://github.com/opentensor/bittensor.git - - - name: Setup Bittensor SDK from cloned repo - working-directory: ${{ github.workspace }}/bittensor - run: | - source ${{ github.workspace }}/venv/bin/activate - git checkout staging - git fetch origin staging - uv run --active pip install --upgrade pip - uv run --active pip install '.[dev]' - uv run --active pip install pytest - - - name: Install uv dependencies - working-directory: ${{ github.workspace }}/bittensor - run: uv sync --all-extras --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet - path: subtensor-localnet.tar - - - name: Load Docker Image - run: docker load -i subtensor-localnet.tar - - - name: Run tests - working-directory: ${{ github.workspace }}/bittensor - run: | - source ${{ github.workspace }}/venv/bin/activate - uv run pytest ${{ matrix.test-file }} -s From 00c03cbf21eb08eac6029f8f82db9941b5a002a5 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 14:51:51 -0700 Subject: [PATCH 047/121] swap --- .github/workflows/check-docker-localnet.yml | 30 --------- .github/workflows/docker-localnet.yml | 69 +++++++++++++++++++++ 2 files changed, 69 insertions(+), 30 deletions(-) delete mode 100644 .github/workflows/check-docker-localnet.yml create mode 100644 .github/workflows/docker-localnet.yml diff --git a/.github/workflows/check-docker-localnet.yml b/.github/workflows/check-docker-localnet.yml deleted file mode 100644 index 8cecb08ccb..0000000000 --- a/.github/workflows/check-docker-localnet.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: Build Localnet Docker Image - -on: - pull_request: - -jobs: - build: - runs-on: SubtensorCI - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build Docker Image - run: docker build -f Dockerfile-localnet -t localnet . - - - name: Save Docker Image as Tar - run: docker save -o subtensor-localnet.tar localnet - - - name: Upload Docker Image as Artifact - uses: actions/upload-artifact@v4 - with: - name: subtensor-localnet - path: subtensor-localnet.tar diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml new file mode 100644 index 0000000000..c2afccae66 --- /dev/null +++ b/.github/workflows/docker-localnet.yml @@ -0,0 +1,69 @@ +name: Publish Localnet Docker Image + +on: + release: + types: [published] + workflow_dispatch: + inputs: + branch-or-tag: + description: "Branch or tag to use for the Docker image tag and ref to checkout (optional)" + required: false + default: "" + push: + branches: + - devnet-ready + +permissions: + contents: read + packages: write + actions: read + security-events: write + +jobs: + publish: + runs-on: SubtensorCI + + steps: + - name: Determine Docker tag and ref + id: tag + run: | + branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" + echo "Determined branch or tag: $branch_or_tag" + echo "tag=$branch_or_tag" >> $GITHUB_ENV + echo "ref=$branch_or_tag" >> $GITHUB_ENV + + # Check if this is a tagged release (not devnet-ready/devnet/testnet) + if [[ "$branch_or_tag" != "devnet-ready" ]]; then + echo "latest_tag=true" >> $GITHUB_ENV + else + echo "latest_tag=false" >> $GITHUB_ENV + fi + + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.ref }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile-localnet + push: true + platforms: linux/amd64,linux/arm64 + tags: | + ghcr.io/${{ github.repository }}-localnet:${{ env.tag }} + ${{ env.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} From bdd5bccd5fe8616c28efde57539c5e0b79c8f4fe Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 14:56:03 -0700 Subject: [PATCH 048/121] fix ids --- .github/workflows/check-bittensor-e2e-tests.yml.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index e8c679a14f..f60bec376e 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -74,10 +74,10 @@ jobs: find-btcli-e2e-tests: needs: check-label - if: always() && needs.check-labels.outputs.run-cli-tests == 'true' + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' runs-on: ubuntu-latest outputs: - test-files: ${{ steps.get-tests.outputs.test-files }} + test-files: ${{ steps.get-btcli-tests.outputs.test-files }} steps: - name: Research preparation working-directory: ${{ github.workspace }} @@ -91,7 +91,7 @@ jobs: run: sudo apt-get install -y jq - name: Find e2e test files - id: get-tests + id: get-btcli-tests run: | test_files=$(find ${{ github.workspace }}/btcli/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') echo "::set-output name=test-files::$test_files" @@ -99,10 +99,10 @@ jobs: find-sdk-e2e-tests: needs: check-label - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' + if: always() && needs.check-labels.outputs.run-bittensor-e2e-tests == 'true' runs-on: ubuntu-latest outputs: - test-files: ${{ steps.get-tests.outputs.test-files }} + test-files: ${{ steps.get-sdk-tests.outputs.test-files }} steps: - name: Research preparation working-directory: ${{ github.workspace }} @@ -116,7 +116,7 @@ jobs: run: sudo apt-get install -y jq - name: Find e2e test files - id: get-tests + id: get-sdk-tests run: | test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') echo "::set-output name=test-files::$test_files" From 4030e7c1d17cf51bc8525c6f0a54abd2377deb0d Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 14:59:12 -0700 Subject: [PATCH 049/121] fixes --- .../check-bittensor-e2e-tests.yml.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index f60bec376e..d371b9a32d 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -1,4 +1,4 @@ -name: Bittensor BTCLI Test +name: Bittensor Bittensor E2E Test permissions: pull-requests: write @@ -27,21 +27,21 @@ jobs: runs-on: ubuntu-latest if: ${{ github.event.pull_request.draft == false }} outputs: - should_continue_cli: ${{ steps.check.outputs.should_continue_cli }} + should_continue: ${{ steps.check.outputs.should_continue }} steps: - name: Check id: check run: | ACTION="${{ github.event.action }}" if [[ "$ACTION" == "opened" || "$ACTION" == "reopened" ]]; then - echo "should_continue_cli=true" >> $GITHUB_OUTPUT + echo "should_continue=true" >> $GITHUB_OUTPUT else - echo "should_continue_cli=false" >> $GITHUB_OUTPUT + echo "should_continue=false" >> $GITHUB_OUTPUT fi shell: bash - name: Add label - if: steps.check.outputs.should_continue_cli == 'true' + if: steps.check.outputs.should_continue == 'true' uses: actions-ecosystem/action-add-labels@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -99,7 +99,7 @@ jobs: find-sdk-e2e-tests: needs: check-label - if: always() && needs.check-labels.outputs.run-bittensor-e2e-tests == 'true' + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' runs-on: ubuntu-latest outputs: test-files: ${{ steps.get-sdk-tests.outputs.test-files }} @@ -122,7 +122,7 @@ jobs: echo "::set-output name=test-files::$test_files" shell: bash - build-image: + build-image-with-current-branch: needs: check-label runs-on: SubtensorCI steps: @@ -152,7 +152,7 @@ jobs: needs: - check-label - find-btcli-e2e-tests - - build-image + - build-image-with-current-branch if: always() && needs.check-labels.outputs.run-cli-tests == 'true' runs-on: ubuntu-latest strategy: @@ -225,7 +225,7 @@ jobs: needs: - check-label - find-sdk-e2e-tests - - build-image + - build-image-with-current-branch if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' runs-on: ubuntu-latest strategy: From 91d61edbdce4f2269c4f740240f98960206657c4 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 15:14:37 -0700 Subject: [PATCH 050/121] add run-bittensor-e2e-tests output --- .github/workflows/check-bittensor-e2e-tests.yml.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index d371b9a32d..fb43b5889e 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -153,7 +153,7 @@ jobs: - check-label - find-btcli-e2e-tests - build-image-with-current-branch - if: always() && needs.check-labels.outputs.run-cli-tests == 'true' + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' runs-on: ubuntu-latest strategy: fail-fast: false @@ -226,7 +226,7 @@ jobs: - check-label - find-sdk-e2e-tests - build-image-with-current-branch - if: always() && needs.check-labels.outputs.run-sdk-tests == 'true' + if: always() && needs.check-label.outputs.run-bittensor-e2e-tests == 'true' runs-on: ubuntu-latest strategy: fail-fast: false From f84be89aa3f4ab32523ea03745df67e1b98af1c5 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 15:30:43 -0700 Subject: [PATCH 051/121] fix needs reference in main steps --- .github/workflows/check-bittensor-e2e-tests.yml.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index fb43b5889e..88afd7b32a 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -165,7 +165,7 @@ jobs: - x86_64-unknown-linux-gnu os: - ubuntu-latest - test-file: ${{ fromJson(needs.find-e2e-tests.outputs.test-files) }} + test-file: ${{ fromJson(needs.find-btcli-e2e-tests.outputs.test-files) }} env: RELEASE_NAME: development @@ -238,7 +238,7 @@ jobs: - x86_64-unknown-linux-gnu os: - ubuntu-latest - test-file: ${{ fromJson(needs.find-e2e-tests.outputs.test-files) }} + test-file: ${{ fromJson(needs.find-sdk-e2e-tests.outputs.test-files) }} env: RELEASE_NAME: development From 397e187543e139d9e2ff02872dd7b72b9317d7f2 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 15:47:50 -0700 Subject: [PATCH 052/121] remove path --- .github/workflows/check-bittensor-e2e-tests.yml.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index 88afd7b32a..53ce2dd390 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -282,7 +282,6 @@ jobs: uses: actions/download-artifact@v4 with: name: subtensor-localnet - path: subtensor-localnet.tar - name: Load Docker Image run: docker load -i subtensor-localnet.tar From e31c45265f7154cf25b4371d3168dc649bed7c91 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 20 Mar 2025 16:06:00 -0700 Subject: [PATCH 053/121] remove path for cli step --- .github/workflows/check-bittensor-e2e-tests.yml.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index 53ce2dd390..1a574eb1d8 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -209,7 +209,6 @@ jobs: uses: actions/download-artifact@v4 with: name: subtensor-localnet - path: subtensor-localnet.tar - name: Load Docker Image run: docker load -i subtensor-localnet.tar From 2b674c71abb17c63a495924289429010751c1fe0 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Thu, 20 Mar 2025 22:07:52 -0400 Subject: [PATCH 054/121] reset bonds for miners recently deregistered --- pallets/subtensor/src/epoch/math.rs | 38 +++++ pallets/subtensor/src/epoch/run_epoch.rs | 34 ++++- pallets/subtensor/src/tests/epoch.rs | 180 +++++++++++++++++++++++ pallets/subtensor/src/tests/math.rs | 39 +++++ 4 files changed, 285 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 436dba84a0..b4f23ced83 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -549,6 +549,24 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { }); } +// Apply column mask to matrix, mask=true will mask out, i.e. set to 0. +// Assumes each column has the same length. +#[allow(dead_code)] +pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { + let Some(first_row) = matrix.first() else { + return; + }; + assert_eq!(mask.len(), first_row.len()); + let zero: I32F32 = I32F32::saturating_from_num(0); + matrix.iter_mut().for_each(|row_elem| { + row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| { + if *mask_col { + *elem = zero; + } + }); + }); +} + // Mask out the diagonal of the input matrix in-place. #[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut [Vec]) { @@ -569,6 +587,26 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { }); } +// Remove cells from sparse matrix where the mask function of a scalar and a vector is true. +#[allow(dead_code, clippy::indexing_slicing)] +pub fn scalar_vec_mask_sparse_matrix( + sparse_matrix: &[Vec<(u16, I32F32)>], + scalar: u64, + vector: &[u64], + mask_fn: &dyn Fn(u64, u64) -> bool, +) -> Vec> { + let n: usize = sparse_matrix.len(); + let mut result: Vec> = vec![vec![]; n]; + for (i, sparse_row) in sparse_matrix.iter().enumerate() { + for (j, value) in sparse_row { + if !mask_fn(scalar, vector[*j as usize]) { + result[i].push((*j, *value)); + } + } + } + result +} + // Mask out the diagonal of the input matrix in-place, except for the diagonal entry at except_index. #[allow(dead_code)] pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: u16) { diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 27dd17fa0b..62027f9636 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -22,6 +22,10 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::trace!("current_block:\n{:?}\n", current_block); + // Get tempo. + let tempo: u64 = Self::get_tempo(netuid).into(); + log::trace!("tempo: {:?}", tempo); + // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; log::trace!("activity_cutoff:\n{:?}\n", activity_cutoff); @@ -44,7 +48,7 @@ impl Pallet { let block_at_registration: Vec = Self::get_block_at_registration(netuid); log::trace!("Block at registration:\n{:?}\n", &block_at_registration); - // Outdated matrix, updated_ij=True if i has last updated (weights) after j has last registered. + // Outdated matrix, outdated_ij=True if i has last updated (weights) after j has last registered. let outdated: Vec> = last_update .iter() .map(|updated| { @@ -56,6 +60,16 @@ impl Pallet { .collect(); log::trace!("Outdated:\n{:?}\n", &outdated); + // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + let recently_registered: Vec = block_at_registration + .iter() + .map(|registered| last_tempo <= *registered) + .collect(); + log::trace!("Recently registered:\n{:?}\n", &recently_registered); + // =========== // == Stake == // =========== @@ -185,7 +199,8 @@ impl Pallet { // Access network bonds. let mut bonds: Vec> = Self::get_bonds(netuid); - inplace_mask_matrix(&outdated, &mut bonds); // mask outdated bonds + // Remove bonds referring to neurons that have registered since last tempo. + inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 log::trace!("B:\n{:?}\n", &bonds); @@ -386,6 +401,10 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::trace!("current_block: {:?}", current_block); + // Get tempo. + let tempo: u64 = Self::get_tempo(netuid).into(); + log::trace!("tempo:\n{:?}\n", tempo); + // Get activity cutoff. let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; log::trace!("activity_cutoff: {:?}", activity_cutoff); @@ -548,12 +567,15 @@ impl Pallet { let mut bonds: Vec> = Self::get_bonds_sparse(netuid); log::trace!("B: {:?}", &bonds); - // Remove bonds referring to deregistered neurons. - bonds = vec_mask_sparse_matrix( + // Remove bonds referring to neurons that have registered since last tempo. + // Mask if: the last tempo block happened *before* the registration block + // ==> last_tempo <= registered + let last_tempo: u64 = current_block.saturating_sub(tempo); + bonds = scalar_vec_mask_sparse_matrix( &bonds, - &last_update, + last_tempo, &block_at_registration, - &|updated, registered| updated <= registered, + &|last_tempo, registered| last_tempo <= registered, ); log::trace!("B (outdatedmask): {:?}", &bonds); diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 38b104ac2b..e7066feee9 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -2124,6 +2124,186 @@ fn test_zero_weights() { }); } +// Test that recently/deregistered miner bonds are cleared before EMA. +#[test] +fn test_deregistered_miner_bonds() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 4; + let netuid: u16 = 1; + let high_tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + + let stake: u64 = 1; + add_network(netuid, high_tempo, 0); + SubtensorModule::set_max_allowed_uids(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_min_allowed_weights(netuid, 0); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + SubtensorModule::set_bonds_penalty(netuid, u16::MAX); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + + // === Register [validator1, validator2, server1, server2] + let block_number = System::block_number(); + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), stake); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), + ); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &U256::from(key), + &U256::from(key), + netuid, + stake, + ); + } + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), n); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); + + // === Issue validator permits + SubtensorModule::set_max_allowed_validators(netuid, n); + assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); + SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); + next_block(); // run to next block to ensure weights are set on nodes after their registration block + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + + // === Set weights [val1->srv1: 2/3, val1->srv2: 1/3, val2->srv1: 2/3, val2->srv2: 1/3] + for uid in 0..(n / 2) as u64 { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![2 * (u16::MAX / 3), u16::MAX / 3], + 0 + )); + } + + // Set tempo high so we don't automatically run epochs + SubtensorModule::set_tempo(netuid, high_tempo); + + // Run 2 blocks + next_block(); + next_block(); + + // set tempo to 2 blocks + SubtensorModule::set_tempo(netuid, 2); + // Run epoch + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check the bond values for the servers + let bonds = SubtensorModule::get_bonds(netuid); + let bond_0_2 = bonds[0][2]; + let bond_0_3 = bonds[0][3]; + + // Non-zero bonds + assert!(bond_0_2 > 0); + assert!(bond_0_3 > 0); + + // Set tempo high so we don't automatically run epochs + SubtensorModule::set_tempo(netuid, high_tempo); + + // Run one more block + next_block(); + + // === Dereg server2 at uid3 (least emission) + register new key over uid3 + let new_key: u64 = n as u64; // register a new key while at max capacity, which means the least incentive uid will be deregistered + let block_number = System::block_number(); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 0, + &U256::from(new_key), + ); + assert_eq!(SubtensorModule::get_max_registrations_per_block(netuid), n); + assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(new_key)), + netuid, + block_number, + nonce, + work, + U256::from(new_key), + U256::from(new_key) + )); + let deregistered_uid: u16 = n - 1; // since uid=n-1 only recieved 1/3 of weight, it will get pruned first + assert_eq!( + U256::from(new_key), + SubtensorModule::get_hotkey_for_net_and_uid(netuid, deregistered_uid) + .expect("Not registered") + ); + + // Set weights again so they're active. + for uid in 0..(n / 2) as u64 { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![2 * (u16::MAX / 3), u16::MAX / 3], + 0 + )); + } + + // Run 1 block + next_block(); + // Assert block at registration happened after the last tempo + let block_at_registration = SubtensorModule::get_neuron_block_at_registration(netuid, 3); + let block_number = System::block_number(); + assert!( + block_at_registration >= block_number - 2, + "block at registration: {}, block number: {}", + block_at_registration, + block_number + ); + + // set tempo to 2 blocks + SubtensorModule::set_tempo(netuid, 2); + // Run epoch again. + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check the bond values for the servers + let bonds = SubtensorModule::get_bonds(netuid); + let bond_0_2_new = bonds[0][2]; + let bond_0_3_new = bonds[0][3]; + + // We expect the old bonds for server2, (uid3), to be reset. + // For server1, (uid2), the bond should be higher than before. + assert!( + bond_0_2_new > bond_0_2, + "bond_0_2_new: {}, bond_0_2: {}", + bond_0_2_new, + bond_0_2 + ); + assert!( + bond_0_3_new <= bond_0_3, + "bond_0_3_new: {}, bond_0_3: {}", + bond_0_3_new, + bond_0_3 + ); + }); +} + // Test that epoch assigns validator permits to highest stake uids, varies uid interleaving and stake values. #[test] fn test_validator_permits() { diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index 036e2015ab..c70da2c9d2 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1220,6 +1220,45 @@ fn test_math_vec_mask_sparse_matrix() { ); } +#[test] +fn test_math_scalar_vec_mask_sparse_matrix() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 8., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 1; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a == b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![1., 2., 0., 4., 5., 0., 7., 8., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 5; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a <= b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 0., 3., 0., 0., 6., 0., 0., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let scalar: u64 = 5; + let masking_vector: Vec = vec![1, 4, 7]; + let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a >= b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + #[test] fn test_math_row_hadamard() { let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); From 4725f550d0d7905cb4a1f4e8cc6ef292f91e0740 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Thu, 20 Mar 2025 22:08:05 -0400 Subject: [PATCH 055/121] reset bonds from deregistered validator --- pallets/subtensor/src/subnets/uids.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index aaf3b5fe6b..c97252677c 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -23,6 +23,7 @@ impl Pallet { Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. From f7e153105eea32d5cd9b15d2d0bacc3501eb9115 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Thu, 20 Mar 2025 22:14:28 -0400 Subject: [PATCH 056/121] add tests --- pallets/subtensor/src/tests/uids.rs | 71 +++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index 178613fbb6..92a8a64048 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -68,6 +68,7 @@ fn test_replace_neuron() { Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); + Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -138,6 +139,76 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip, 0); assert_eq!(axon_info.port, 0); assert_eq!(axon_info.ip_type, 0); + + // Check bonds are cleared. + assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + }); +} + +#[test] +fn test_bonds_cleared_on_replace() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 1; + let tempo: u16 = 13; + let hotkey_account_id = U256::from(1); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 111111, + &hotkey_account_id, + ); + let coldkey_account_id = U256::from(1234); + + let new_hotkey_account_id = U256::from(2); + let _new_colkey_account_id = U256::from(12345); + + //add network + add_network(netuid, tempo, 0); + + // Register a neuron. + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work, + hotkey_account_id, + coldkey_account_id + )); + + // Get UID + let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id); + assert_ok!(neuron_uid); + let neuron_uid = neuron_uid.unwrap(); + + // set non-default bonds + Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + + // Replace the neuron. + SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); + + // Check old hotkey is not registered on any network. + assert!(SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id).is_err()); + assert!(!SubtensorModule::is_hotkey_registered_on_any_network( + &hotkey_account_id + )); + + let curr_hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, neuron_uid); + assert_ok!(curr_hotkey); + assert_ne!(curr_hotkey.unwrap(), hotkey_account_id); + + // Check new hotkey is registered on the network. + assert!( + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &new_hotkey_account_id).is_ok() + ); + assert!(SubtensorModule::is_hotkey_registered_on_any_network( + &new_hotkey_account_id + )); + assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); + + // Check bonds are cleared. + assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); }); } From 213b7b1198a6144e622afa3d4ce83eaf39f2df18 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 01:30:17 -0400 Subject: [PATCH 057/121] use next_block/run_to_block _no_epoch --- pallets/subtensor/src/tests/epoch.rs | 71 ++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index e7066feee9..c112710175 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -982,6 +982,28 @@ fn test_512_graph_random_weights() { // }); // } +fn next_block_no_epoch(netuid: u16) -> u64 { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + let new_block = next_block(); + SubtensorModule::set_tempo(netuid, old_tempo); + + new_block +} + +fn run_to_block_no_epoch(netuid: u16, n: u64) { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + run_to_block(n); + SubtensorModule::set_tempo(netuid, old_tempo); +} + // Test bonds exponential moving average over a sequence of epochs. #[test] fn test_bonds() { @@ -989,7 +1011,7 @@ fn test_bonds() { let sparse: bool = true; let n: u16 = 8; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); @@ -1018,7 +1040,7 @@ fn test_bonds() { SubtensorModule::set_max_allowed_validators(netuid, n); assert_eq!( SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch( netuid, 1_000_000_000 ); // run first epoch to set allowed validators - next_block(); // run to next block to ensure weights are set on nodes after their registration block + next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block // === Set weights [val->srv1: 0.1, val->srv2: 0.2, val->srv3: 0.3, val->srv4: 0.4] for uid in 0..(n/2) as u64 { @@ -1068,7 +1090,8 @@ fn test_bonds() { // === Set self-weight only on val1 let uid = 0; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* n: 8 @@ -1115,7 +1138,8 @@ fn test_bonds() { // === Set self-weight only on val2 let uid = 1; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 3 @@ -1151,7 +1175,8 @@ fn test_bonds() { // === Set self-weight only on val3 let uid = 2; assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(uid)), netuid, vec![uid], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 4 @@ -1186,7 +1211,8 @@ fn test_bonds() { // === Set val3->srv4: 1 assert_ok!(SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(2)), netuid, vec![7], vec![u16::MAX], 0)); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 5 @@ -1219,7 +1245,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 6 @@ -1240,7 +1267,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 7 @@ -1261,7 +1289,8 @@ fn test_bonds() { assert_eq!(bonds[2][7], 49150); assert_eq!(bonds[3][7], 65535); - next_block(); + next_block_no_epoch(netuid); + if sparse { SubtensorModule::epoch( netuid, 1_000_000_000 ); } else { SubtensorModule::epoch_dense( netuid, 1_000_000_000 ); } /* current_block: 8 @@ -1286,7 +1315,7 @@ fn test_bonds_with_liquid_alpha() { let sparse: bool = true; let n: u16 = 8; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); @@ -1326,7 +1355,7 @@ fn test_bonds_with_liquid_alpha() { // Initilize with first epoch SubtensorModule::epoch(netuid, 1_000_000_000); - next_block(); + next_block_no_epoch(netuid); // Set weights for uid in 0..(n / 2) { @@ -1417,7 +1446,7 @@ fn test_bonds_with_liquid_alpha() { vec![u16::MAX], 0 )); - next_block(); + next_block_no_epoch(netuid); if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1439,7 +1468,7 @@ fn test_bonds_with_liquid_alpha() { vec![u16::MAX], 0 )); - next_block(); + next_block_no_epoch(netuid); if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1543,7 +1572,7 @@ fn test_active_stake() { let sparse: bool = true; let n: u16 = 4; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 1; let block_number: u64 = System::block_number(); let stake: u64 = 1; add_network(netuid, tempo, 0); @@ -1586,7 +1615,7 @@ fn test_active_stake() { SubtensorModule::set_max_allowed_validators(netuid, n); assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators - next_block(); // run to next block to ensure weights are set on nodes after their registration block + next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block // === Set weights [val1->srv1: 0.5, val1->srv2: 0.5, val2->srv1: 0.5, val2->srv2: 0.5] for uid in 0..(n / 2) as u64 { @@ -1627,7 +1656,7 @@ fn test_active_stake() { } } let activity_cutoff: u64 = SubtensorModule::get_activity_cutoff(netuid) as u64; - run_to_block(activity_cutoff + 2); // run to block where validator (uid 0, 1) weights become outdated + run_to_block_no_epoch(netuid, activity_cutoff + 2); // run to block where validator (uid 0, 1) weights become outdated // === Update uid 0 weights assert_ok!(SubtensorModule::set_weights( @@ -1697,7 +1726,7 @@ fn test_active_stake() { vec![u16::MAX / (n / 2); (n / 2) as usize], 0 )); - run_to_block(activity_cutoff + 3); // run to block where validator (uid 0, 1) weights become outdated + run_to_block_no_epoch(netuid, activity_cutoff + 3); // run to block where validator (uid 0, 1) weights become outdated if sparse { SubtensorModule::epoch(netuid, 1_000_000_000); } else { @@ -1750,7 +1779,7 @@ fn test_outdated_weights() { let sparse: bool = true; let n: u16 = 4; let netuid: u16 = 1; - let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let tempo: u16 = 0; let mut block_number: u64 = System::block_number(); let stake: u64 = 1; add_network(netuid, tempo, 0); @@ -1796,7 +1825,7 @@ fn test_outdated_weights() { assert_eq!(SubtensorModule::get_max_allowed_validators(netuid), n); SubtensorModule::epoch(netuid, 1_000_000_000); // run first epoch to set allowed validators assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 4); - block_number = next_block(); // run to next block to ensure weights are set on nodes after their registration block + block_number = next_block_no_epoch(netuid); // run to next block to ensure weights are set on nodes after their registration block assert_eq!(SubtensorModule::get_registrations_this_block(netuid), 0); // === Set weights [val1->srv1: 2/3, val1->srv2: 1/3, val2->srv1: 2/3, val2->srv2: 1/3, srv1->srv1: 1, srv2->srv2: 1] @@ -1877,7 +1906,7 @@ fn test_outdated_weights() { SubtensorModule::get_hotkey_for_net_and_uid(netuid, deregistered_uid) .expect("Not registered") ); - next_block(); // run to next block to outdate weights and bonds set on deregistered uid + next_block_no_epoch(netuid); // run to next block to outdate weights and bonds set on deregistered uid // === Update weights from only uid=0 assert_ok!(SubtensorModule::set_weights( @@ -2290,7 +2319,7 @@ fn test_deregistered_miner_bonds() { // We expect the old bonds for server2, (uid3), to be reset. // For server1, (uid2), the bond should be higher than before. assert!( - bond_0_2_new > bond_0_2, + bond_0_2_new >= bond_0_2, "bond_0_2_new: {}, bond_0_2: {}", bond_0_2_new, bond_0_2 From cbca1c39579a67d57b3e301b999399ad73622ee1 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 01:30:26 -0400 Subject: [PATCH 058/121] chore: fmt --- pallets/subtensor/src/tests/epoch.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index c112710175..9c32c836c0 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -983,25 +983,25 @@ fn test_512_graph_random_weights() { // } fn next_block_no_epoch(netuid: u16) -> u64 { - // high tempo to skip automatic epochs in on_initialize - let high_tempo: u16 = u16::MAX - 1; - let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - SubtensorModule::set_tempo(netuid, high_tempo); - let new_block = next_block(); - SubtensorModule::set_tempo(netuid, old_tempo); + SubtensorModule::set_tempo(netuid, high_tempo); + let new_block = next_block(); + SubtensorModule::set_tempo(netuid, old_tempo); - new_block + new_block } fn run_to_block_no_epoch(netuid: u16, n: u64) { - // high tempo to skip automatic epochs in on_initialize - let high_tempo: u16 = u16::MAX - 1; - let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - SubtensorModule::set_tempo(netuid, high_tempo); - run_to_block(n); - SubtensorModule::set_tempo(netuid, old_tempo); + SubtensorModule::set_tempo(netuid, high_tempo); + run_to_block(n); + SubtensorModule::set_tempo(netuid, old_tempo); } // Test bonds exponential moving average over a sequence of epochs. From c86de44c0e23c6fcfb5db0e4e84aad51393d5e5f Mon Sep 17 00:00:00 2001 From: open-junius Date: Fri, 21 Mar 2025 15:04:36 +0800 Subject: [PATCH 059/121] benchmark file update --- pallets/subtensor/src/benchmarks.rs | 33 ++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 47dd7a5bc5..de8bd251f2 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -6,7 +6,7 @@ use crate::Pallet as Subtensor; use crate::*; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::assert_ok; -use frame_system::RawOrigin; +use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; pub use pallet::*; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Hash}; @@ -658,4 +658,35 @@ benchmark_burn_alpha { }: burn_alpha(RawOrigin::Signed(coldkey), hotkey, alpha_amount, netuid) + +benchmark_start_call { + let caller: T::AccountId = whitelisted_caller::>(); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + + // Set up coldkey and hotkey + let coldkey: T::AccountId = account("Test", 0, seed); + let hotkey: T::AccountId = account("Alice", 0, seed); + + // Initialize network + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + + // Register the neuron + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1000000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); + + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); + assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); + assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + let current_block: u64 = Subtensor::::get_current_block_as_u64(); + let duration = ::DurationOfStartCall::get(); + let block: BlockNumberFor = (current_block + duration).try_into().ok().expect(""); + frame_system::Pallet::::set_block_number(block); + +}: start_call(RawOrigin::Signed(coldkey), netuid) + } From 187ed948b06523eb0aaa0fa693ec7bde5572ec63 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 21 Mar 2025 09:53:52 -0700 Subject: [PATCH 060/121] mark deprecated items --- pallets/commitments/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index a136a2a7e5..e838f81a30 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -123,12 +123,12 @@ pub mod pallet { } #[pallet::type_value] - /// Default value for commitment rate limit. + /// *DEPRECATED* Default value for commitment rate limit. pub fn DefaultRateLimit() -> BlockNumberFor { T::DefaultRateLimit::get() } - /// The rate limit for commitments + /// *DEPRECATED* The rate limit for commitments #[pallet::storage] pub type RateLimit = StorageValue<_, BlockNumberFor, ValueQuery, DefaultRateLimit>; From ef9a7afc00e0c5a1e74e3751844f0483867b2a17 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 21 Mar 2025 13:58:57 -0400 Subject: [PATCH 061/121] Adjust staking fees --- pallets/subtensor/src/staking/stake_utils.rs | 22 +++++++------- pallets/subtensor/src/tests/staking.rs | 13 ++++++--- pallets/subtensor/src/tests/staking2.rs | 30 -------------------- 3 files changed, 21 insertions(+), 44 deletions(-) diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index 894a5a9132..262780320e 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -1075,21 +1075,13 @@ impl Pallet { pub(crate) fn calculate_staking_fee( origin: Option<(&T::AccountId, u16)>, _origin_coldkey: &T::AccountId, - destination: Option<(&T::AccountId, u16)>, + _destination: Option<(&T::AccountId, u16)>, _destination_coldkey: &T::AccountId, alpha_estimate: I96F32, ) -> u64 { match origin { // If origin is defined, we are removing/moving stake Some((origin_hotkey, origin_netuid)) => { - if let Some((_destination_hotkey, destination_netuid)) = destination { - // This is a stake move/swap/transfer - if destination_netuid == origin_netuid { - // If destination is on the same subnet, use the default fee - return DefaultStakingFee::::get(); - } - } - if origin_netuid == Self::get_root_netuid() || SubnetMechanism::::get(origin_netuid) == 0 { @@ -1097,7 +1089,7 @@ impl Pallet { DefaultStakingFee::::get() } else { // Otherwise, calculate the fee based on the alpha estimate - let fee = alpha_estimate + let mut fee = alpha_estimate .saturating_mul( I96F32::saturating_from_num(AlphaDividendsPerSubnet::::get( origin_netuid, @@ -1110,6 +1102,16 @@ impl Pallet { .saturating_mul(Self::get_alpha_price(origin_netuid)) // fee needs to be in TAO .saturating_to_num::(); + // 0.005% per epoch matches to 44% annual in compound interest. Do not allow the fee + // to be lower than that. (1.00005^(365*20) ~= 1.44) + let apr_20_percent = I96F32::saturating_from_num(0.00005); + fee = fee.max( + alpha_estimate + .saturating_mul(apr_20_percent) + .saturating_to_num::(), + ); + + // We should at least get DefaultStakingFee anyway fee.max(DefaultStakingFee::::get()) } } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 33d686a604..1fc4e7b590 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -2311,7 +2311,10 @@ fn test_remove_stake_fee_realistic_values() { ); // Estimate fees - let expected_fee: f64 = current_price * alpha_divs as f64; + let mut expected_fee: f64 = current_price * alpha_divs as f64; + if expected_fee < alpha_to_unstake as f64 * 0.00005 { + expected_fee = alpha_to_unstake as f64 * 0.00005; + } // Remove stake to measure fee let balance_before = SubtensorModule::get_coldkey_balance(&coldkey); @@ -3903,7 +3906,7 @@ fn test_remove_99_9991_per_cent_stake_removes_all() { let coldkey_account_id = U256::from(81337); let amount = 10_000_000_000; let netuid: u16 = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); - let fee = DefaultStakingFee::::get(); + let mut fee = DefaultStakingFee::::get(); register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, 192213123); // Give it some $$$ in his coldkey balance @@ -3923,17 +3926,19 @@ fn test_remove_99_9991_per_cent_stake_removes_all() { &coldkey_account_id, netuid, ); + let remove_amount = (U64F64::from_num(alpha) * U64F64::from_num(0.999991)).to_num::(); assert_ok!(SubtensorModule::remove_stake( RuntimeOrigin::signed(coldkey_account_id), hotkey_account_id, netuid, - (U64F64::from_num(alpha) * U64F64::from_num(0.999991)).to_num::() + remove_amount, )); // Check that all alpha was unstaked and all TAO balance was returned (less fees) + fee = fee + fee.max((remove_amount as f64 * 0.00005) as u64); assert_abs_diff_eq!( SubtensorModule::get_coldkey_balance(&coldkey_account_id), - amount - fee * 2, + amount - fee, epsilon = 10000, ); assert_eq!( diff --git a/pallets/subtensor/src/tests/staking2.rs b/pallets/subtensor/src/tests/staking2.rs index 57f880ffba..38cef90934 100644 --- a/pallets/subtensor/src/tests/staking2.rs +++ b/pallets/subtensor/src/tests/staking2.rs @@ -914,35 +914,5 @@ fn test_stake_fee_calculation() { I96F32::from_num(stake_amount), ); // Charged a dynamic fee assert_ne!(stake_fee_5, default_fee); - - // Test stake fee for move between hotkeys on non-root - let stake_fee_6 = SubtensorModule::calculate_staking_fee( - Some((&hotkey1, netuid0)), - &coldkey1, - Some((&hotkey2, netuid0)), - &coldkey1, - I96F32::from_num(stake_amount), - ); // Charge the default fee - assert_eq!(stake_fee_6, default_fee); - - // Test stake fee for move between coldkeys on non-root - let stake_fee_7 = SubtensorModule::calculate_staking_fee( - Some((&hotkey1, netuid0)), - &coldkey1, - Some((&hotkey1, netuid0)), - &coldkey2, - I96F32::from_num(stake_amount), - ); // Charge the default fee; stake did not leave the subnet. - assert_eq!(stake_fee_7, default_fee); - - // Test stake fee for *swap* from non-root to non-root - let stake_fee_8 = SubtensorModule::calculate_staking_fee( - Some((&hotkey1, netuid0)), - &coldkey1, - Some((&hotkey1, netuid1)), - &coldkey1, - I96F32::from_num(stake_amount), - ); // Charged a dynamic fee - assert_ne!(stake_fee_8, default_fee); }); } From 589f9f2f93e6f1124778137fe3d7de9a2a7d3db3 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 21 Mar 2025 14:10:45 -0400 Subject: [PATCH 062/121] Revert move fee --- pallets/subtensor/src/staking/stake_utils.rs | 10 ++++++- pallets/subtensor/src/tests/staking2.rs | 30 ++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index 262780320e..53e8dbaa55 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -1075,13 +1075,21 @@ impl Pallet { pub(crate) fn calculate_staking_fee( origin: Option<(&T::AccountId, u16)>, _origin_coldkey: &T::AccountId, - _destination: Option<(&T::AccountId, u16)>, + destination: Option<(&T::AccountId, u16)>, _destination_coldkey: &T::AccountId, alpha_estimate: I96F32, ) -> u64 { match origin { // If origin is defined, we are removing/moving stake Some((origin_hotkey, origin_netuid)) => { + if let Some((_destination_hotkey, destination_netuid)) = destination { + // This is a stake move/swap/transfer + if destination_netuid == origin_netuid { + // If destination is on the same subnet, use the default fee + return DefaultStakingFee::::get(); + } + } + if origin_netuid == Self::get_root_netuid() || SubnetMechanism::::get(origin_netuid) == 0 { diff --git a/pallets/subtensor/src/tests/staking2.rs b/pallets/subtensor/src/tests/staking2.rs index 38cef90934..57f880ffba 100644 --- a/pallets/subtensor/src/tests/staking2.rs +++ b/pallets/subtensor/src/tests/staking2.rs @@ -914,5 +914,35 @@ fn test_stake_fee_calculation() { I96F32::from_num(stake_amount), ); // Charged a dynamic fee assert_ne!(stake_fee_5, default_fee); + + // Test stake fee for move between hotkeys on non-root + let stake_fee_6 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey2, netuid0)), + &coldkey1, + I96F32::from_num(stake_amount), + ); // Charge the default fee + assert_eq!(stake_fee_6, default_fee); + + // Test stake fee for move between coldkeys on non-root + let stake_fee_7 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, netuid0)), + &coldkey2, + I96F32::from_num(stake_amount), + ); // Charge the default fee; stake did not leave the subnet. + assert_eq!(stake_fee_7, default_fee); + + // Test stake fee for *swap* from non-root to non-root + let stake_fee_8 = SubtensorModule::calculate_staking_fee( + Some((&hotkey1, netuid0)), + &coldkey1, + Some((&hotkey1, netuid1)), + &coldkey1, + I96F32::from_num(stake_amount), + ); // Charged a dynamic fee + assert_ne!(stake_fee_8, default_fee); }); } From b6833627af451f52ca0718c7d483325b24438232 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 16:53:26 -0400 Subject: [PATCH 063/121] add a pending alpha (to valis) for prop em --- .../subtensor/src/coinbase/run_coinbase.rs | 126 +++++++++++------- pallets/subtensor/src/tests/coinbase.rs | 2 + 2 files changed, 81 insertions(+), 47 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 7836423868..0b85be3774 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -100,6 +100,52 @@ impl Pallet { log::debug!("alpha_in: {:?}", alpha_in); log::debug!("alpha_out: {:?}", alpha_out); + // --- 7. Drain pending emission through the subnet based on tempo. + for &netuid in subnets.iter() { + // Pass on subnets that have not reached their tempo. + if Self::should_run_epoch(netuid, current_block) { + if let Err(e) = Self::reveal_crv3_commits(netuid) { + log::warn!( + "Failed to reveal commits for subnet {} due to error: {:?}", + netuid, + e + ); + }; + + // Restart counters. + BlocksSinceLastStep::::insert(netuid, 0); + LastMechansimStepBlock::::insert(netuid, current_block); + + // Get and drain the subnet pending emission. + let pending_alpha: u64 = PendingEmission::::get(netuid); + PendingEmission::::insert(netuid, 0); + + // Get and drain the subnet pending root divs. + let pending_tao: u64 = PendingRootDivs::::get(netuid); + PendingRootDivs::::insert(netuid, 0); + + // Get this amount as alpha that was swapped for pending root divs. + let pending_swapped: u64 = PendingAlphaSwapped::::get(netuid); + PendingAlphaSwapped::::insert(netuid, 0); + + // Get owner cut and drain. + let owner_cut: u64 = PendingOwnerCut::::get(netuid); + PendingOwnerCut::::insert(netuid, 0); + + // Drain pending root divs, alpha emission, and owner cut. + Self::drain_pending_emission( + netuid, + pending_alpha, + pending_tao, + pending_swapped, + owner_cut, + ); + } else { + // Increment + BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); + } + } + // --- 4. Injection. // Actually perform the injection of alpha_in, alpha_out and tao_in into the subnet pool. // This operation changes the pool liquidity each block. @@ -203,52 +249,6 @@ impl Pallet { // Update moving prices after using them above. Self::update_moving_price(*netuid_i); } - - // --- 7. Drain pending emission through the subnet based on tempo. - for &netuid in subnets.iter() { - // Pass on subnets that have not reached their tempo. - if Self::should_run_epoch(netuid, current_block) { - if let Err(e) = Self::reveal_crv3_commits(netuid) { - log::warn!( - "Failed to reveal commits for subnet {} due to error: {:?}", - netuid, - e - ); - }; - - // Restart counters. - BlocksSinceLastStep::::insert(netuid, 0); - LastMechansimStepBlock::::insert(netuid, current_block); - - // Get and drain the subnet pending emission. - let pending_alpha: u64 = PendingEmission::::get(netuid); - PendingEmission::::insert(netuid, 0); - - // Get and drain the subnet pending root divs. - let pending_tao: u64 = PendingRootDivs::::get(netuid); - PendingRootDivs::::insert(netuid, 0); - - // Get this amount as alpha that was swapped for pending root divs. - let pending_swapped: u64 = PendingAlphaSwapped::::get(netuid); - PendingAlphaSwapped::::insert(netuid, 0); - - // Get owner cut and drain. - let owner_cut: u64 = PendingOwnerCut::::get(netuid); - PendingOwnerCut::::insert(netuid, 0); - - // Drain pending root divs, alpha emission, and owner cut. - Self::drain_pending_emission( - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut, - ); - } else { - // Increment - BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); - } - } } pub fn drain_pending_emission( @@ -295,9 +295,19 @@ impl Pallet { log::debug!("incentives: {:?}", incentives); log::debug!("dividends: {:?}", dividends); + // Compute the pending validator alpha. + // This is the total alpha being injected, + // minus the the alpha for the miners, (50%) + // and minus the alpha swapped for TAO (pending_swapped). + let pending_validator_alpha: u64 = pending_alpha + .saturating_add(pending_swapped) + .saturating_div(2) + .saturating_sub(pending_swapped); + Self::distribute_dividends_and_incentives( netuid, pending_tao, + pending_validator_alpha, owner_cut, incentives, dividends, @@ -307,6 +317,7 @@ impl Pallet { pub fn distribute_dividends_and_incentives( netuid: u16, pending_tao: u64, + pending_alpha: u64, owner_cut: u64, incentives: BTreeMap, dividends: BTreeMap, @@ -317,6 +328,7 @@ impl Pallet { // Accumulate root divs and alpha_divs. For each hotkey we compute their // local and root dividend proportion based on their alpha_stake/root_stake let mut total_root_divs: I96F32 = asfloat!(0); + let mut total_alpha_divs: I96F32 = asfloat!(0); let mut root_dividends: BTreeMap = BTreeMap::new(); let mut alpha_dividends: BTreeMap = BTreeMap::new(); for (hotkey, dividend) in dividends { @@ -331,7 +343,7 @@ impl Pallet { let root_alpha: I96F32 = root_stake.saturating_mul(Self::get_tao_weight()); // Get total from root and local let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); - // Copmute root prop. + // Compute root prop. let root_prop: I96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); // Compute root dividends let root_divs: I96F32 = dividend.saturating_mul(root_prop); @@ -342,6 +354,8 @@ impl Pallet { .entry(hotkey.clone()) .and_modify(|e| *e = e.saturating_add(alpha_divs)) .or_insert(alpha_divs); + // Accumulate total alpha divs. + total_alpha_divs = total_alpha_divs.saturating_add(alpha_divs); // Record the root dividends. root_dividends .entry(hotkey.clone()) @@ -371,6 +385,24 @@ impl Pallet { } log::debug!("tao_dividends: {:?}", tao_dividends); + // Compute proportional alpha divs using the pending alpha and total alpha divs from the epoch. + let mut prop_alpha_dividends: BTreeMap = BTreeMap::new(); + for (hotkey, alpha_divs) in alpha_dividends.iter() { + // Alpha proportion. + let alpha_share: I96F32 = alpha_divs.checked_div(total_alpha_divs).unwrap_or(zero); + log::debug!("hotkey: {:?}, alpha_share: {:?}", hotkey, alpha_share); + + // Compute the proportional pending_alpha to this hotkey. + let prop_alpha: I96F32 = asfloat!(pending_alpha).saturating_mul(alpha_share); + log::debug!("hotkey: {:?}, prop_alpha: {:?}", hotkey, prop_alpha); + // Record the proportional alpha dividends. + prop_alpha_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = prop_alpha) + .or_insert(prop_alpha); + } + log::debug!("prop_alpha_dividends: {:?}", prop_alpha_dividends); + // Distribute the owner cut. if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 9f59fe338e..99f95f2a74 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1454,6 +1454,7 @@ fn test_incentive_to_subnet_owner_is_burned() { let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); let pending_tao: u64 = 1_000_000_000; + let pending_alpha: u64 = 0; // None to valis let owner_cut: u64 = 0; let mut incentives: BTreeMap = BTreeMap::new(); let mut dividends: BTreeMap = BTreeMap::new(); @@ -1475,6 +1476,7 @@ fn test_incentive_to_subnet_owner_is_burned() { SubtensorModule::distribute_dividends_and_incentives( netuid, pending_tao, + pending_alpha, owner_cut, incentives, dividends, From ef17ae66d5c2840f2df7ccd35a6afca978cd839f Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 17:28:08 -0400 Subject: [PATCH 064/121] undo move epoch --- .../subtensor/src/coinbase/run_coinbase.rs | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 0b85be3774..346064fd8c 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -100,52 +100,6 @@ impl Pallet { log::debug!("alpha_in: {:?}", alpha_in); log::debug!("alpha_out: {:?}", alpha_out); - // --- 7. Drain pending emission through the subnet based on tempo. - for &netuid in subnets.iter() { - // Pass on subnets that have not reached their tempo. - if Self::should_run_epoch(netuid, current_block) { - if let Err(e) = Self::reveal_crv3_commits(netuid) { - log::warn!( - "Failed to reveal commits for subnet {} due to error: {:?}", - netuid, - e - ); - }; - - // Restart counters. - BlocksSinceLastStep::::insert(netuid, 0); - LastMechansimStepBlock::::insert(netuid, current_block); - - // Get and drain the subnet pending emission. - let pending_alpha: u64 = PendingEmission::::get(netuid); - PendingEmission::::insert(netuid, 0); - - // Get and drain the subnet pending root divs. - let pending_tao: u64 = PendingRootDivs::::get(netuid); - PendingRootDivs::::insert(netuid, 0); - - // Get this amount as alpha that was swapped for pending root divs. - let pending_swapped: u64 = PendingAlphaSwapped::::get(netuid); - PendingAlphaSwapped::::insert(netuid, 0); - - // Get owner cut and drain. - let owner_cut: u64 = PendingOwnerCut::::get(netuid); - PendingOwnerCut::::insert(netuid, 0); - - // Drain pending root divs, alpha emission, and owner cut. - Self::drain_pending_emission( - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut, - ); - } else { - // Increment - BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); - } - } - // --- 4. Injection. // Actually perform the injection of alpha_in, alpha_out and tao_in into the subnet pool. // This operation changes the pool liquidity each block. @@ -249,6 +203,52 @@ impl Pallet { // Update moving prices after using them above. Self::update_moving_price(*netuid_i); } + + // --- 7. Drain pending emission through the subnet based on tempo. + for &netuid in subnets.iter() { + // Pass on subnets that have not reached their tempo. + if Self::should_run_epoch(netuid, current_block) { + if let Err(e) = Self::reveal_crv3_commits(netuid) { + log::warn!( + "Failed to reveal commits for subnet {} due to error: {:?}", + netuid, + e + ); + }; + + // Restart counters. + BlocksSinceLastStep::::insert(netuid, 0); + LastMechansimStepBlock::::insert(netuid, current_block); + + // Get and drain the subnet pending emission. + let pending_alpha: u64 = PendingEmission::::get(netuid); + PendingEmission::::insert(netuid, 0); + + // Get and drain the subnet pending root divs. + let pending_tao: u64 = PendingRootDivs::::get(netuid); + PendingRootDivs::::insert(netuid, 0); + + // Get this amount as alpha that was swapped for pending root divs. + let pending_swapped: u64 = PendingAlphaSwapped::::get(netuid); + PendingAlphaSwapped::::insert(netuid, 0); + + // Get owner cut and drain. + let owner_cut: u64 = PendingOwnerCut::::get(netuid); + PendingOwnerCut::::insert(netuid, 0); + + // Drain pending root divs, alpha emission, and owner cut. + Self::drain_pending_emission( + netuid, + pending_alpha, + pending_tao, + pending_swapped, + owner_cut, + ); + } else { + // Increment + BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); + } + } } pub fn drain_pending_emission( From abc3603752d914c337cefaa37936602883e03409 Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 24 Mar 2025 09:42:10 +0800 Subject: [PATCH 065/121] rename the storage --- pallets/subtensor/src/benchmarks.rs | 2 +- .../subtensor/src/coinbase/run_coinbase.rs | 5 +---- pallets/subtensor/src/lib.rs | 2 +- pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/macros/errors.rs | 4 ++-- pallets/subtensor/src/macros/events.rs | 4 ++-- .../migrate_set_last_emission_block_number.rs | 2 +- pallets/subtensor/src/subnets/subnet.rs | 14 ++++++------- pallets/subtensor/src/tests/migration.rs | 4 ++-- pallets/subtensor/src/tests/mock.rs | 4 ++-- pallets/subtensor/src/tests/subnet.rs | 20 +++++++++---------- 11 files changed, 30 insertions(+), 33 deletions(-) diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index de8bd251f2..1e0339268f 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -681,7 +681,7 @@ benchmark_start_call { assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); assert_eq!(SubnetOwner::::get(netuid), coldkey.clone()); - assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); let current_block: u64 = Subtensor::::get_current_block_as_u64(); let duration = ::DurationOfStartCall::get(); let block: BlockNumberFor = (current_block + duration).try_into().ok().expect(""); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index f5292281db..c33b0897a7 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -41,7 +41,7 @@ impl Pallet { let subnets: Vec = Self::get_all_subnet_netuids() .into_iter() .filter(|netuid| *netuid != 0) - .filter(|netuid| LastEmissionBlockNumber::::get(*netuid).is_some()) + .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) .collect(); log::debug!("All subnet netuids: {:?}", subnets); @@ -245,9 +245,6 @@ impl Pallet { pending_swapped, owner_cut, ); - - // Set last emission block - LastEmissionBlockNumber::::insert(netuid, Self::get_current_block_as_u64()) } else { // Increment BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 3df312f92b..604b68ec3b 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1127,7 +1127,7 @@ pub mod pallet { /// ============================ /// --- MAP ( netuid ) --> block number of last emission #[pallet::storage] - pub type LastEmissionBlockNumber = StorageMap<_, Identity, u16, u64, OptionQuery>; + pub type FirstEmissionBlockNumber = StorageMap<_, Identity, u16, u64, OptionQuery>; /// --- MAP ( netuid ) --> subnet mechanism #[pallet::storage] pub type SubnetMechanism = diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 5b9eb14e1d..6b12b529cf 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1920,7 +1920,7 @@ mod dispatches { /// Emits a `CallInitiated` event on success. #[pallet::call_index(92)] #[pallet::weight(( - Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 3)), + Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(6, 1)), DispatchClass::Operational, Pays::Yes ))] diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index f0e40d4207..ef965bf169 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -195,8 +195,8 @@ mod errors { ActivityCutoffTooLow, /// Call is disabled CallDisabled, - /// LastEmissionBlockNumber is already set. - LastEmissionBlockNumberAlreadySet, + /// FirstEmissionBlockNumber is already set. + FirstEmissionBlockNumberAlreadySet, /// need wait for more blocks to accept the start call extrinsic. NeedWaitingMoreBlocksToStarCall, /// Not enough AlphaOut on the subnet to recycle diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 74a32932b4..04a2093abf 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -275,12 +275,12 @@ mod events { /// Parameters: /// (netuid, new_hotkey) SubnetOwnerHotkeySet(u16, T::AccountId), - /// LastEmissionBlockNumber is set via start call extrinsic + /// FirstEmissionBlockNumber is set via start call extrinsic /// /// Parameters: /// netuid /// block number - LastEmissionBlockNumberSet(u16, u64), + FirstEmissionBlockNumberSet(u16, u64), /// Alpha has been recycled, reducing AlphaOut on a subnet. /// diff --git a/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs b/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs index bc1f7b9abd..00e60a13cf 100644 --- a/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs +++ b/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs @@ -27,7 +27,7 @@ pub fn migrate_set_last_emission_block_number() -> Weight { let current_block_number = Pallet::::get_current_block_as_u64(); for netuid in netuids.iter() { if *netuid != 0 { - LastEmissionBlockNumber::::insert(netuid, current_block_number); + FirstEmissionBlockNumber::::insert(netuid, current_block_number); } } diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index fc29703cc7..58f3f9c6a1 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -337,7 +337,7 @@ impl Pallet { /// /// * `Error::::SubNetworkDoesNotExist`: If the subnet does not exist. /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. - /// * `Error::::LastEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. + /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. /// /// # Returns /// @@ -349,8 +349,8 @@ impl Pallet { ); Self::ensure_subnet_owner(origin, netuid)?; ensure!( - LastEmissionBlockNumber::::get(netuid).is_none(), - Error::::LastEmissionBlockNumberAlreadySet + FirstEmissionBlockNumber::::get(netuid).is_none(), + Error::::FirstEmissionBlockNumberAlreadySet ); let registration_block_number = NetworkRegisteredAt::::get(netuid); @@ -362,15 +362,15 @@ impl Pallet { Error::::NeedWaitingMoreBlocksToStarCall ); - LastEmissionBlockNumber::::insert(netuid, current_block_number); - Self::deposit_event(Event::LastEmissionBlockNumberSet( + FirstEmissionBlockNumber::::insert(netuid, current_block_number + 1); + Self::deposit_event(Event::FirstEmissionBlockNumberSet( netuid, - current_block_number, + current_block_number + 1, )); Ok(()) } pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { - LastEmissionBlockNumber::::get(netuid).is_some() + FirstEmissionBlockNumber::::get(netuid).is_some() } } diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 549bc329dd..3864657432 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -432,9 +432,9 @@ fn test_migrate_set_last_emission_block_number() { let expected_weight: Weight = ::DbWeight::get().reads(3) + ::DbWeight::get().writes(netuids.len() as u64); assert_eq!(weight, expected_weight); - assert_eq!(LastEmissionBlockNumber::::get(0), None); + assert_eq!(FirstEmissionBlockNumber::::get(0), None); for netuid in netuids.iter() { - assert_eq!(LastEmissionBlockNumber::::get(netuid), Some(block_number)); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), Some(block_number)); } }); } diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 525ab2b58d..04d6888595 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -664,7 +664,7 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::init_new_network(netuid, tempo); SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); - LastEmissionBlockNumber::::insert(netuid, 0); + FirstEmissionBlockNumber::::insert(netuid, 0); } #[allow(dead_code)] @@ -686,7 +686,7 @@ pub fn add_dynamic_network(hotkey: &U256, coldkey: &U256) -> u16 { )); NetworkRegistrationAllowed::::insert(netuid, true); NetworkPowRegistrationAllowed::::insert(netuid, true); - LastEmissionBlockNumber::::insert(netuid, 0); + FirstEmissionBlockNumber::::insert(netuid, 0); netuid } diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index 978c0509dd..2378062b55 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -19,7 +19,7 @@ fn test_do_start_call_ok() { //add network SubtensorModule::set_burn(netuid, burn_cost); add_network_without_emission_block(netuid, tempo, 0); - assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); // Give it some $$$ in his coldkey balance SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); @@ -42,8 +42,8 @@ fn test_do_start_call_ok() { )); assert_eq!( - LastEmissionBlockNumber::::get(netuid), - Some(block_number) + FirstEmissionBlockNumber::::get(netuid), + Some(block_number + 1) ); }); } @@ -143,7 +143,7 @@ fn test_do_start_call_fail_for_set_again() { //add network SubtensorModule::set_burn(netuid, burn_cost); add_network_without_emission_block(netuid, tempo, 0); - assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); // Give it some $$$ in his coldkey balance SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); @@ -170,7 +170,7 @@ fn test_do_start_call_fail_for_set_again() { <::RuntimeOrigin>::signed(coldkey_account_id), netuid ), - Error::::LastEmissionBlockNumberAlreadySet + Error::::FirstEmissionBlockNumberAlreadySet ); }); } @@ -186,7 +186,7 @@ fn test_do_start_call_ok_with_updated_block_number_after_coinbase() { //add network SubtensorModule::set_burn(netuid, burn_cost); add_network_without_emission_block(netuid, tempo, 0); - assert_eq!(LastEmissionBlockNumber::::get(netuid), None); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); // Give it some $$$ in his coldkey balance SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); @@ -209,14 +209,14 @@ fn test_do_start_call_ok_with_updated_block_number_after_coinbase() { )); assert_eq!( - LastEmissionBlockNumber::::get(netuid), - Some(block_number) + FirstEmissionBlockNumber::::get(netuid), + Some(block_number + 1) ); step_block(tempo); - match LastEmissionBlockNumber::::get(netuid) { + match FirstEmissionBlockNumber::::get(netuid) { Some(new_emission_block_number) => assert!(new_emission_block_number > block_number), - None => assert!(LastEmissionBlockNumber::::get(netuid).is_some()), + None => assert!(FirstEmissionBlockNumber::::get(netuid).is_some()), } }); } From 9d8877f65a7868c63a6a42af84c9ca911f208dd0 Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 24 Mar 2025 09:54:00 +0800 Subject: [PATCH 066/121] cargo fix --- pallets/subtensor/src/subnets/subnet.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 58f3f9c6a1..310c1d5fde 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -361,11 +361,12 @@ impl Pallet { >= registration_block_number.saturating_add(T::DurationOfStartCall::get()), Error::::NeedWaitingMoreBlocksToStarCall ); + let next_block_number = current_block_number.saturating_add(1); - FirstEmissionBlockNumber::::insert(netuid, current_block_number + 1); + FirstEmissionBlockNumber::::insert(netuid, next_block_number); Self::deposit_event(Event::FirstEmissionBlockNumberSet( netuid, - current_block_number + 1, + next_block_number, )); Ok(()) } From b8c8da75a4c4eb96491cfff421dab16f0f28b449 Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 24 Mar 2025 20:02:38 +0800 Subject: [PATCH 067/121] commit Cargo.lock --- pallets/subtensor/src/benchmarks.rs | 2 +- .../subtensor/src/coinbase/run_coinbase.rs | 2 +- pallets/subtensor/src/lib.rs | 2 +- pallets/subtensor/src/macros/config.rs | 2 +- pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/macros/hooks.rs | 2 +- .../migrate_set_last_emission_block_number.rs | 53 ------------------- pallets/subtensor/src/migrations/mod.rs | 2 +- pallets/subtensor/src/tests/migration.rs | 4 +- pallets/subtensor/src/tests/mock.rs | 2 +- 10 files changed, 10 insertions(+), 63 deletions(-) delete mode 100644 pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 1e0339268f..8d4457b0c9 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -684,7 +684,7 @@ benchmark_start_call { assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); let current_block: u64 = Subtensor::::get_current_block_as_u64(); let duration = ::DurationOfStartCall::get(); - let block: BlockNumberFor = (current_block + duration).try_into().ok().expect(""); + let block: BlockNumberFor = (current_block + duration).try_into().ok().expect("can't convert to block number"); frame_system::Pallet::::set_block_number(block); }: start_call(RawOrigin::Signed(coldkey), netuid) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index c33b0897a7..7a29d4959a 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -37,7 +37,7 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::debug!("Current block: {:?}", current_block); - // --- 1. Get all netuids (filter out root and new subnet) + // --- 1. Get all netuids (filter out root and new subnet without first emission block) let subnets: Vec = Self::get_all_subnet_netuids() .into_iter() .filter(|netuid| *netuid != 0) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 604b68ec3b..1ec9cadb0a 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1125,7 +1125,7 @@ pub mod pallet { /// ============================ /// ==== Subnet Parameters ===== /// ============================ - /// --- MAP ( netuid ) --> block number of last emission + /// --- MAP ( netuid ) --> block number of first emission #[pallet::storage] pub type FirstEmissionBlockNumber = StorageMap<_, Identity, u16, u64, OptionQuery>; /// --- MAP ( netuid ) --> subnet mechanism diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 889c7e5c5f..cf4d97b65b 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -210,7 +210,7 @@ mod config { /// Initial EMA price halving period #[pallet::constant] type InitialEmaPriceHalvingPeriod: Get; - /// Block number for a new subnet accept the start call extrinsic. + /// Block number after a new subnet accept the start call extrinsic. #[pallet::constant] type DurationOfStartCall: Get; } diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 6b12b529cf..9158073e17 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1917,7 +1917,7 @@ mod dispatches { /// * `netuid` - The unique identifier of the subnet on which the call is being initiated. /// /// # Events - /// Emits a `CallInitiated` event on success. + /// Emits a `FirstEmissionBlockNumberSet` event on success. #[pallet::call_index(92)] #[pallet::weight(( Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(6, 1)), diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 68cd2a7623..3203ae312c 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -83,7 +83,7 @@ mod hooks { // Remove unused maps entries .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()) // Set last emission block number for all existed subnets before start call feature applied - .saturating_add(migrations::migrate_set_last_emission_block_number::migrate_set_last_emission_block_number::()); + .saturating_add(migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs b/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs deleted file mode 100644 index 00e60a13cf..0000000000 --- a/pallets/subtensor/src/migrations/migrate_set_last_emission_block_number.rs +++ /dev/null @@ -1,53 +0,0 @@ -use super::*; -use crate::HasMigrationRun; -use frame_support::{traits::Get, weights::Weight}; -use scale_info::prelude::string::String; - -pub fn migrate_set_last_emission_block_number() -> Weight { - let migration_name = b"migrate_set_last_emission_block_number".to_vec(); - - let mut weight = T::DbWeight::get().reads(1); - if HasMigrationRun::::get(&migration_name) { - log::info!( - "Migration '{:?}' has already run. Skipping.", - String::from_utf8_lossy(&migration_name) - ); - return weight; - } - - log::info!( - "Running migration '{:?}'", - String::from_utf8_lossy(&migration_name) - ); - - // ------------------------------ - // Step 1: Set the last emission block for all subnets except root - // ------------------------------ - let netuids = Pallet::::get_all_subnet_netuids(); - let current_block_number = Pallet::::get_current_block_as_u64(); - for netuid in netuids.iter() { - if *netuid != 0 { - FirstEmissionBlockNumber::::insert(netuid, current_block_number); - } - } - - // ------------------------------ - // Step 2: Mark Migration as Completed - // ------------------------------ - - HasMigrationRun::::insert(&migration_name, true); - weight = weight.saturating_add(T::DbWeight::get().reads(2)); - - if netuids.is_empty() { - weight = weight.saturating_add(T::DbWeight::get().writes(1_u64)); - } else { - weight = weight.saturating_add(T::DbWeight::get().writes(netuids.len() as u64)); - } - - log::info!( - "Migration '{:?}' completed successfully.", - String::from_utf8_lossy(&migration_name) - ); - - weight -} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index cada8a9997..19e057e3ec 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -11,7 +11,7 @@ pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_remove_stake_map; pub mod migrate_remove_unused_maps_and_values; -pub mod migrate_set_last_emission_block_number; +pub mod migrate_set_first_emission_block_number; pub mod migrate_set_min_burn; pub mod migrate_set_min_difficulty; pub mod migrate_stake_threshold; diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 3864657432..2fac8c5db5 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -419,7 +419,7 @@ fn test_migrate_subnet_volume() { } #[test] -fn test_migrate_set_last_emission_block_number() { +fn test_migrate_set_first_emission_block_number() { new_test_ext(1).execute_with(|| { let netuids: [u16; 3] = [1, 2, 3]; let block_number = 100; @@ -427,7 +427,7 @@ fn test_migrate_set_last_emission_block_number() { add_network(*netuid, 1, 0); } run_to_block(block_number); - let weight = crate::migrations::migrate_set_last_emission_block_number::migrate_set_last_emission_block_number::(); + let weight = crate::migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::(); let expected_weight: Weight = ::DbWeight::get().reads(3) + ::DbWeight::get().writes(netuids.len() as u64); assert_eq!(weight, expected_weight); diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 04d6888595..ea5e3d4492 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -664,7 +664,7 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::init_new_network(netuid, tempo); SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); - FirstEmissionBlockNumber::::insert(netuid, 0); + FirstEmissionBlockNumber::::insert(netuid, 1); } #[allow(dead_code)] From 810f94c3b3e8989de92501fdb1274c9122c11421 Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 24 Mar 2025 20:12:33 +0800 Subject: [PATCH 068/121] update migration name --- ...migrate_set_first_emission_block_number.rs | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs diff --git a/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs b/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs new file mode 100644 index 0000000000..04ad306218 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_set_first_emission_block_number.rs @@ -0,0 +1,53 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_set_first_emission_block_number() -> Weight { + let migration_name = b"migrate_set_first_emission_block_number".to_vec(); + + let mut weight = T::DbWeight::get().reads(1); + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{:?}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Set the first emission block for all subnets except root + // ------------------------------ + let netuids = Pallet::::get_all_subnet_netuids(); + let current_block_number = Pallet::::get_current_block_as_u64(); + for netuid in netuids.iter() { + if *netuid != 0 { + FirstEmissionBlockNumber::::insert(netuid, current_block_number); + } + } + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().reads(2)); + + if netuids.is_empty() { + weight = weight.saturating_add(T::DbWeight::get().writes(1_u64)); + } else { + weight = weight.saturating_add(T::DbWeight::get().writes(netuids.len() as u64)); + } + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} From 5fb588a946530eb20d303d5d5030fb9d8350c516 Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 24 Mar 2025 20:15:08 +0800 Subject: [PATCH 069/121] update coinbase test case --- pallets/subtensor/src/tests/subnet.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index 2378062b55..1daf19159f 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -176,7 +176,7 @@ fn test_do_start_call_fail_for_set_again() { } #[test] -fn test_do_start_call_ok_with_updated_block_number_after_coinbase() { +fn test_do_start_call_ok_with_same_block_number_after_coinbase() { new_test_ext(0).execute_with(|| { let netuid: u16 = 1; let tempo: u16 = 13; @@ -215,7 +215,9 @@ fn test_do_start_call_ok_with_updated_block_number_after_coinbase() { step_block(tempo); match FirstEmissionBlockNumber::::get(netuid) { - Some(new_emission_block_number) => assert!(new_emission_block_number > block_number), + Some(new_emission_block_number) => { + assert_eq!(new_emission_block_number, block_number + 1) + } None => assert!(FirstEmissionBlockNumber::::get(netuid).is_some()), } }); From ad195fd55f9d898b7cb253931636b8ed15ae15fb Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Mon, 24 Mar 2025 15:36:27 +0100 Subject: [PATCH 070/121] doc: added comment in localnet Dockerfile about fastblocks default --- Dockerfile-localnet | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 0efaf5b47a..0de11cb866 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -62,4 +62,6 @@ ENV RUN_IN_DOCKER=1 EXPOSE 30334 30335 9944 9945 ENTRYPOINT ["/scripts/localnet.sh"] +# Fast blocks defaults to True, you can disable it by passing False to the docker command, e.g.: +# docker run ghcr.io/opentensor/subtensor-localnet False CMD ["True"] From 1428d2400d67f19f2b7f359a380d79afa0dccd69 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 24 Mar 2025 10:55:22 -0700 Subject: [PATCH 071/121] remove `TotalHotkeyAlpha` balances that drop to 0 --- pallets/subtensor/src/staking/recycle_alpha.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 109d26c850..cb5e740e84 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -42,7 +42,14 @@ impl Pallet { Error::::InsufficientLiquidity ); - TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| *v = v.saturating_sub(amount)); + if TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| { + *v = v.saturating_sub(amount); + *v + }) == 0 + { + TotalHotkeyAlpha::::remove(&hotkey, netuid); + } + SubnetAlphaOut::::mutate(netuid, |total| { *total = total.saturating_sub(amount); }); @@ -92,7 +99,13 @@ impl Pallet { Error::::InsufficientLiquidity ); - TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| *v = v.saturating_sub(amount)); + if TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| { + *v = v.saturating_sub(amount); + *v + }) == 0 + { + TotalHotkeyAlpha::::remove(&hotkey, netuid); + } // Deposit event Self::deposit_event(Event::AlphaBurned(coldkey, hotkey, amount, netuid)); From c862c4685a097bfaf1a5bd12e3fdac2eab661508 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 24 Mar 2025 11:05:06 -0700 Subject: [PATCH 072/121] add migration to remove existing zero value items --- pallets/subtensor/src/macros/hooks.rs | 4 +- .../migrate_remove_zero_total_hotkey_alpha.rs | 61 +++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 3203ae312c..834e6c86bb 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -83,7 +83,9 @@ mod hooks { // Remove unused maps entries .saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::()) // Set last emission block number for all existed subnets before start call feature applied - .saturating_add(migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::()); + .saturating_add(migrations::migrate_set_first_emission_block_number::migrate_set_first_emission_block_number::()) + // Remove all zero value entries in TotalHotkeyAlpha + .saturating_add(migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs new file mode 100644 index 0000000000..c6ca619b48 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs @@ -0,0 +1,61 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; +use log; + + +pub fn migrate_remove_zero_total_hotkey_alpha() -> Weight { + let migration_name = b"migrate_remove_zero_total_hotkey_alpha".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // ------------------------------ + // Step 0: Check if already run + // ------------------------------ + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Remove any zero entries in TotalHotkeyAlpha + // ------------------------------ + + let mut removed_entries_count = 0u64; + + // For each (hotkey, netuid, alpha) entry, remove if alpha == 0 + for (hotkey, netuid, alpha) in TotalHotkeyAlpha::::iter() { + if alpha == 0 { + TotalHotkeyAlpha::::remove(&hotkey, netuid); + removed_entries_count += 1; + } + } + + weight = weight.saturating_add(T::DbWeight::get().reads(removed_entries_count)); + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count)); + + log::info!( + "Removed {} zero entries from TotalHotkeyAlpha.", + removed_entries_count + ); + + // ------------------------------ + // Step 2: Mark Migration as Completed + // ------------------------------ + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} \ No newline at end of file diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 19e057e3ec..c9108b4a5b 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -20,3 +20,4 @@ pub mod migrate_to_v1_separate_emission; pub mod migrate_to_v2_fixed_total_stake; pub mod migrate_total_issuance; pub mod migrate_transfer_ownership_to_foundation; +pub mod migrate_remove_zero_total_hotkey_alpha; From b663a4fdddc1fc533eabf585ced948f346ef0837 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 24 Mar 2025 11:24:56 -0700 Subject: [PATCH 073/121] add migration test --- pallets/subtensor/src/tests/migration.rs | 42 ++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 2fac8c5db5..0ecbfb927d 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -438,3 +438,45 @@ fn test_migrate_set_first_emission_block_number() { } }); } + +#[test] +fn test_migrate_remove_zero_total_hotkey_alpha() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_remove_zero_total_hotkey_alpha"; + let netuid = 1u16; + + let hotkey_zero = U256::from(100u64); + let hotkey_nonzero = U256::from(101u64); + + // Insert one zero-alpha entry and one non-zero entry + TotalHotkeyAlpha::::insert(hotkey_zero, netuid, 0u64); + TotalHotkeyAlpha::::insert(hotkey_nonzero, netuid, 123u64); + + assert_eq!(TotalHotkeyAlpha::::get(hotkey_zero, netuid), 0u64); + assert_eq!(TotalHotkeyAlpha::::get(hotkey_nonzero, netuid), 123u64); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet." + ); + + let weight = crate::migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::(); + + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as run." + ); + + assert!( + !TotalHotkeyAlpha::::contains_key(hotkey_zero, netuid), + "Zero-alpha entry should have been removed." + ); + + assert_eq!(TotalHotkeyAlpha::::get(hotkey_nonzero, netuid), 123u64); + + assert!( + !weight.is_zero(), + "Migration weight should be non-zero." + ); + }); +} \ No newline at end of file From 280079d92ad0f5599eef07cf602c315bcd26d52f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 24 Mar 2025 11:25:53 -0700 Subject: [PATCH 074/121] fmt --- .../src/migrations/migrate_remove_zero_total_hotkey_alpha.rs | 5 ++--- pallets/subtensor/src/migrations/mod.rs | 2 +- pallets/subtensor/src/tests/migration.rs | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs index c6ca619b48..5e88c0efcd 100644 --- a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs +++ b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs @@ -1,8 +1,7 @@ use super::*; use frame_support::{traits::Get, weights::Weight}; -use scale_info::prelude::string::String; use log; - +use scale_info::prelude::string::String; pub fn migrate_remove_zero_total_hotkey_alpha() -> Weight { let migration_name = b"migrate_remove_zero_total_hotkey_alpha".to_vec(); @@ -58,4 +57,4 @@ pub fn migrate_remove_zero_total_hotkey_alpha() -> Weight { ); weight -} \ No newline at end of file +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index c9108b4a5b..b342d54979 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -11,6 +11,7 @@ pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_remove_stake_map; pub mod migrate_remove_unused_maps_and_values; +pub mod migrate_remove_zero_total_hotkey_alpha; pub mod migrate_set_first_emission_block_number; pub mod migrate_set_min_burn; pub mod migrate_set_min_difficulty; @@ -20,4 +21,3 @@ pub mod migrate_to_v1_separate_emission; pub mod migrate_to_v2_fixed_total_stake; pub mod migrate_total_issuance; pub mod migrate_transfer_ownership_to_foundation; -pub mod migrate_remove_zero_total_hotkey_alpha; diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 0ecbfb927d..0628127413 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -461,7 +461,7 @@ fn test_migrate_remove_zero_total_hotkey_alpha() { ); let weight = crate::migrations::migrate_remove_zero_total_hotkey_alpha::migrate_remove_zero_total_hotkey_alpha::(); - + assert!( HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), "Migration should be marked as run." @@ -479,4 +479,4 @@ fn test_migrate_remove_zero_total_hotkey_alpha() { "Migration weight should be non-zero." ); }); -} \ No newline at end of file +} From 554a7e8614c041fa9caf9693f8e35726c3d9550f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 24 Mar 2025 11:30:07 -0700 Subject: [PATCH 075/121] clippy --- .../src/migrations/migrate_remove_zero_total_hotkey_alpha.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs index 5e88c0efcd..3b45615bf4 100644 --- a/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs +++ b/pallets/subtensor/src/migrations/migrate_remove_zero_total_hotkey_alpha.rs @@ -33,7 +33,7 @@ pub fn migrate_remove_zero_total_hotkey_alpha() -> Weight { for (hotkey, netuid, alpha) in TotalHotkeyAlpha::::iter() { if alpha == 0 { TotalHotkeyAlpha::::remove(&hotkey, netuid); - removed_entries_count += 1; + removed_entries_count = removed_entries_count.saturating_add(1); } } From ef5d36f7ebc6c899b34cc60393cabe4cd00803dd Mon Sep 17 00:00:00 2001 From: tb-team-dev-2 Date: Mon, 24 Mar 2025 23:49:12 +0000 Subject: [PATCH 076/121] feat: add actual_fee to StakeAdded and StakeRemoved events --- pallets/subtensor/src/macros/events.rs | 4 ++-- pallets/subtensor/src/staking/stake_utils.rs | 12 ++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 834aa901fa..2bccf8b8ac 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -14,9 +14,9 @@ mod events { /// a network is removed. NetworkRemoved(u16), /// stake has been transferred from the a coldkey account onto the hotkey staking account. - StakeAdded(T::AccountId, T::AccountId, u64, u64, u16), + StakeAdded(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been removed from the hotkey staking account onto the coldkey account. - StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16), + StakeRemoved(T::AccountId, T::AccountId, u64, u64, u16, u64), /// stake has been moved from origin (hotkey, subnet ID) to destination (hotkey, subnet ID) of this amount (in TAO). StakeMoved(T::AccountId, T::AccountId, u16, T::AccountId, u16, u64), /// a caller successfully sets their weights on a subnetwork. diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index 894a5a9132..de99ffa106 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -794,14 +794,16 @@ impl Pallet { tao_unstaked, actual_alpha_decrease, netuid, + actual_fee )); log::debug!( - "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?} )", + "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", coldkey.clone(), hotkey.clone(), tao_unstaked, actual_alpha_decrease, - netuid + netuid, + actual_fee ); // Step 6: Return the amount of TAO unstaked. @@ -857,14 +859,16 @@ impl Pallet { tao_staked, actual_alpha, netuid, + actual_fee )); log::debug!( - "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?} )", + "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", coldkey.clone(), hotkey.clone(), tao_staked, actual_alpha, - netuid + netuid, + actual_fee ); // Step 7: Return the amount of alpha staked From 9d9893a9e7d3bb122d1e16aef7a5e7217df8a769 Mon Sep 17 00:00:00 2001 From: tb-team-dev-2 Date: Mon, 24 Mar 2025 23:54:47 +0000 Subject: [PATCH 077/121] feat: add actual_fee to StakeAdded and StakeRemoved events pt 2 --- pallets/subtensor/src/staking/stake_utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index de99ffa106..105e230247 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -794,7 +794,7 @@ impl Pallet { tao_unstaked, actual_alpha_decrease, netuid, - actual_fee + actual_fee, )); log::debug!( "StakeRemoved( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", @@ -859,7 +859,7 @@ impl Pallet { tao_staked, actual_alpha, netuid, - actual_fee + actual_fee, )); log::debug!( "StakeAdded( coldkey: {:?}, hotkey:{:?}, tao: {:?}, alpha:{:?}, netuid: {:?}, fee: {:?} )", From 60b355109555a7a8fb2a6395771186b65ce52738 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Mon, 24 Mar 2025 22:48:11 -0400 Subject: [PATCH 078/121] shorten start call duration for fast blocks --- runtime/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 22659c130a..c766000b99 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1044,7 +1044,11 @@ parameter_types! { pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const SubtensorInitialTaoWeight: u64 = 971_718_665_099_567_868; // 0.05267697438728329% tao weight. pub const InitialEmaPriceHalvingPeriod: u64 = 201_600_u64; // 4 weeks - pub const DurationOfStartCall: u64 = 7 * 24 * 60 * 60 / 12; // 7 days + pub const DurationOfStartCall: u64 = if cfg!(feature = "fast-blocks") { + 10 // Only 10 blocks for fast blocks + } else { + 7 * 24 * 60 * 60 / 12 // 7 days + }; } impl pallet_subtensor::Config for Runtime { From 31a7f6277b57d7e67736e4f35732511274fbb7f7 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 17:41:56 -0400 Subject: [PATCH 079/121] use prop_alpha_dividends --- pallets/subtensor/src/coinbase/run_coinbase.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 346064fd8c..e3acf92429 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -387,7 +387,7 @@ impl Pallet { // Compute proportional alpha divs using the pending alpha and total alpha divs from the epoch. let mut prop_alpha_dividends: BTreeMap = BTreeMap::new(); - for (hotkey, alpha_divs) in alpha_dividends.iter() { + for (hotkey, alpha_divs) in alpha_dividends { // Alpha proportion. let alpha_share: I96F32 = alpha_divs.checked_div(total_alpha_divs).unwrap_or(zero); log::debug!("hotkey: {:?}, alpha_share: {:?}", hotkey, alpha_share); @@ -447,7 +447,7 @@ impl Pallet { // Distribute alpha divs. let _ = AlphaDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); - for (hotkey, mut alpha_divs) in alpha_dividends { + for (hotkey, mut alpha_divs) in prop_alpha_dividends { // Get take prop let alpha_take: I96F32 = Self::get_hotkey_take_float(&hotkey).saturating_mul(alpha_divs); From eeba9418104dae5860a01369e5b4ad8423689252 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 18:41:25 -0400 Subject: [PATCH 080/121] refactor coinbase for testability --- .../subtensor/src/coinbase/run_coinbase.rs | 98 ++++++++++++------- pallets/subtensor/src/tests/coinbase.rs | 7 +- 2 files changed, 66 insertions(+), 39 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index e3acf92429..3bef3df5af 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -251,22 +251,11 @@ impl Pallet { } } - pub fn drain_pending_emission( + pub fn calculate_dividends_and_incentives( netuid: u16, pending_alpha: u64, - pending_tao: u64, pending_swapped: u64, - owner_cut: u64, - ) { - log::debug!( - "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut - ); - + ) -> (BTreeMap, BTreeMap) { // Run the epoch. let hotkey_emission: Vec<(T::AccountId, u64, u64)> = Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); @@ -295,32 +284,17 @@ impl Pallet { log::debug!("incentives: {:?}", incentives); log::debug!("dividends: {:?}", dividends); - // Compute the pending validator alpha. - // This is the total alpha being injected, - // minus the the alpha for the miners, (50%) - // and minus the alpha swapped for TAO (pending_swapped). - let pending_validator_alpha: u64 = pending_alpha - .saturating_add(pending_swapped) - .saturating_div(2) - .saturating_sub(pending_swapped); - - Self::distribute_dividends_and_incentives( - netuid, - pending_tao, - pending_validator_alpha, - owner_cut, - incentives, - dividends, - ); + (incentives, dividends) } - pub fn distribute_dividends_and_incentives( + pub fn calculate_dividend_distribution( netuid: u16, - pending_tao: u64, pending_alpha: u64, - owner_cut: u64, - incentives: BTreeMap, + pending_tao: u64, dividends: BTreeMap, + ) -> ( + BTreeMap, + BTreeMap, ) { // Setup. let zero: I96F32 = asfloat!(0.0); @@ -403,6 +377,16 @@ impl Pallet { } log::debug!("prop_alpha_dividends: {:?}", prop_alpha_dividends); + (prop_alpha_dividends, tao_dividends) + } + + pub fn distribute_dividends_and_incentives( + netuid: u16, + owner_cut: u64, + incentives: BTreeMap, + alpha_dividends: BTreeMap, + tao_dividends: BTreeMap, + ) { // Distribute the owner cut. if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { @@ -447,7 +431,7 @@ impl Pallet { // Distribute alpha divs. let _ = AlphaDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); - for (hotkey, mut alpha_divs) in prop_alpha_dividends { + for (hotkey, mut alpha_divs) in alpha_dividends { // Get take prop let alpha_take: I96F32 = Self::get_hotkey_take_float(&hotkey).saturating_mul(alpha_divs); @@ -499,6 +483,50 @@ impl Pallet { } } + pub fn drain_pending_emission( + netuid: u16, + pending_alpha: u64, + pending_tao: u64, + pending_swapped: u64, + owner_cut: u64, + ) { + log::debug!( + "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", + netuid, + pending_alpha, + pending_tao, + pending_swapped, + owner_cut + ); + + // Compute the pending validator alpha. + // This is the total alpha being injected, + // minus the the alpha for the miners, (50%) + // and minus the alpha swapped for TAO (pending_swapped). + let pending_validator_alpha: u64 = pending_alpha + .saturating_add(pending_swapped) + .saturating_div(2) + .saturating_sub(pending_swapped); + + let (incentives, dividends) = + Self::calculate_dividends_and_incentives(netuid, pending_alpha, pending_swapped); + + let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( + netuid, + pending_validator_alpha, + pending_tao, + dividends, + ); + + Self::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + alpha_dividends, + tao_dividends, + ); + } + /// Returns the self contribution of a hotkey on a subnet. /// This is the portion of the hotkey's stake that is provided by itself, and not delegated to other hotkeys. pub fn get_self_contribution(hotkey: &T::AccountId, netuid: u16) -> u64 { diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 99f95f2a74..889d984f3c 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1457,7 +1457,6 @@ fn test_incentive_to_subnet_owner_is_burned() { let pending_alpha: u64 = 0; // None to valis let owner_cut: u64 = 0; let mut incentives: BTreeMap = BTreeMap::new(); - let mut dividends: BTreeMap = BTreeMap::new(); // Give incentive to other_hk incentives.insert(other_hk, 10_000_000); @@ -1475,11 +1474,10 @@ fn test_incentive_to_subnet_owner_is_burned() { // Distribute dividends and incentives SubtensorModule::distribute_dividends_and_incentives( netuid, - pending_tao, - pending_alpha, owner_cut, incentives, - dividends, + BTreeMap::new(), + BTreeMap::new(), ); // Verify stake after @@ -1490,3 +1488,4 @@ fn test_incentive_to_subnet_owner_is_burned() { assert!(other_stake_after > 0); }); } + From 7ed1430645f3e3134448580e5cbc0fdbc5887c80 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 18:52:57 -0400 Subject: [PATCH 081/121] rename --- pallets/subtensor/src/coinbase/run_coinbase.rs | 4 ++-- pallets/subtensor/src/tests/children.rs | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 3bef3df5af..5f8f1901fb 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -272,7 +272,7 @@ impl Pallet { .or_insert(incentive); // Accumulate dividends to parents. let div_tuples: Vec<(T::AccountId, u64)> = - Self::get_dividends_distribution(&hotkey, netuid, dividend); + Self::get_parent_child_dividends_distribution(&hotkey, netuid, dividend); // Accumulate dividends per hotkey. for (parent, parent_div) in div_tuples { dividends @@ -575,7 +575,7 @@ impl Pallet { /// # Returns /// * dividend_tuples: `Vec<(T::AccountId, u64)>` - Vector of (hotkey, divs) for each parent including self. /// - pub fn get_dividends_distribution( + pub fn get_parent_child_dividends_distribution( hotkey: &T::AccountId, netuid: u16, dividends: u64, diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 21ddd453f5..faf48a8366 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -3382,17 +3382,17 @@ fn test_dividend_distribution_with_children() { "C should have pending emission of 1/9 of total emission" ); - let dividends_a = SubtensorModule::get_dividends_distribution( + let dividends_a = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_a, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_b = SubtensorModule::get_dividends_distribution( + let dividends_b = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_b, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_c = SubtensorModule::get_dividends_distribution( + let dividends_c = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_c, netuid, hardcoded_emission.saturating_to_num::(), @@ -3883,12 +3883,12 @@ fn test_dividend_distribution_with_children_same_coldkey_owner() { ); // Get the distribution of dividends including the Parent/Child relationship. - let dividends_a = SubtensorModule::get_dividends_distribution( + let dividends_a = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_a, netuid, hardcoded_emission.saturating_to_num::(), ); - let dividends_b = SubtensorModule::get_dividends_distribution( + let dividends_b = SubtensorModule::get_parent_child_dividends_distribution( &hotkey_b, netuid, hardcoded_emission.saturating_to_num::(), From 5c64e936fa7879c1ac91630c8424cb170a10ad0c Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 19:21:26 -0400 Subject: [PATCH 082/121] use stake_map as input instead of storage --- .../subtensor/src/coinbase/run_coinbase.rs | 85 ++++++++++++------- 1 file changed, 52 insertions(+), 33 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 5f8f1901fb..6a95922e26 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -288,9 +288,9 @@ impl Pallet { } pub fn calculate_dividend_distribution( - netuid: u16, pending_alpha: u64, pending_tao: u64, + stake_map: BTreeMap, dividends: BTreeMap, ) -> ( BTreeMap, @@ -306,37 +306,37 @@ impl Pallet { let mut root_dividends: BTreeMap = BTreeMap::new(); let mut alpha_dividends: BTreeMap = BTreeMap::new(); for (hotkey, dividend) in dividends { - // Get hotkey ALPHA on subnet. - let alpha_stake = asfloat!(Self::get_stake_for_hotkey_on_subnet(&hotkey, netuid)); - // Get hotkey TAO on root. - let root_stake: I96F32 = asfloat!(Self::get_stake_for_hotkey_on_subnet( - &hotkey, - Self::get_root_netuid() - )); - // Convert TAO to alpha with weight. - let root_alpha: I96F32 = root_stake.saturating_mul(Self::get_tao_weight()); - // Get total from root and local - let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); - // Compute root prop. - let root_prop: I96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); - // Compute root dividends - let root_divs: I96F32 = dividend.saturating_mul(root_prop); - // Compute alpha dividends - let alpha_divs: I96F32 = dividend.saturating_sub(root_divs); - // Record the alpha dividends. - alpha_dividends - .entry(hotkey.clone()) - .and_modify(|e| *e = e.saturating_add(alpha_divs)) - .or_insert(alpha_divs); - // Accumulate total alpha divs. - total_alpha_divs = total_alpha_divs.saturating_add(alpha_divs); - // Record the root dividends. - root_dividends - .entry(hotkey.clone()) - .and_modify(|e| *e = e.saturating_add(root_divs)) - .or_insert(root_divs); - // Accumulate total root divs. - total_root_divs = total_root_divs.saturating_add(root_divs); + if let Some((alpha_stake_u64, root_stake_u64)) = stake_map.get(&hotkey) { + // Get hotkey ALPHA on subnet. + let alpha_stake: I96F32 = asfloat!(*alpha_stake_u64); + // Get hotkey TAO on root. + let root_stake: I96F32 = asfloat!(*root_stake_u64); + + // Convert TAO to alpha with weight. + let root_alpha: I96F32 = root_stake.saturating_mul(Self::get_tao_weight()); + // Get total from root and local + let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); + // Compute root prop. + let root_prop: I96F32 = root_alpha.checked_div(total_alpha).unwrap_or(zero); + // Compute root dividends + let root_divs: I96F32 = dividend.saturating_mul(root_prop); + // Compute alpha dividends + let alpha_divs: I96F32 = dividend.saturating_sub(root_divs); + // Record the alpha dividends. + alpha_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = e.saturating_add(alpha_divs)) + .or_insert(alpha_divs); + // Accumulate total alpha divs. + total_alpha_divs = total_alpha_divs.saturating_add(alpha_divs); + // Record the root dividends. + root_dividends + .entry(hotkey.clone()) + .and_modify(|e| *e = e.saturating_add(root_divs)) + .or_insert(root_divs); + // Accumulate total root divs. + total_root_divs = total_root_divs.saturating_add(root_divs); + } } log::debug!("alpha_dividends: {:?}", alpha_dividends); log::debug!("root_dividends: {:?}", root_dividends); @@ -483,6 +483,22 @@ impl Pallet { } } + pub fn get_stake_map( + netuid: u16, + hotkeys: Vec<&T::AccountId>, + ) -> BTreeMap { + let mut stake_map: BTreeMap = BTreeMap::new(); + for hotkey in hotkeys { + // Get hotkey ALPHA on subnet. + let alpha_stake: u64 = Self::get_stake_for_hotkey_on_subnet(hotkey, netuid); + // Get hotkey TAO on root. + let root_stake: u64 = + Self::get_stake_for_hotkey_on_subnet(hotkey, Self::get_root_netuid()); + stake_map.insert(hotkey.clone(), (alpha_stake, root_stake)); + } + stake_map + } + pub fn drain_pending_emission( netuid: u16, pending_alpha: u64, @@ -511,10 +527,13 @@ impl Pallet { let (incentives, dividends) = Self::calculate_dividends_and_incentives(netuid, pending_alpha, pending_swapped); + let stake_map: BTreeMap = + Self::get_stake_map(netuid, dividends.keys().collect::>()); + let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( - netuid, pending_validator_alpha, pending_tao, + stake_map, dividends, ); From 9f43a9dd5db844750ba0a6e156e68b1125a32e89 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 19:37:42 -0400 Subject: [PATCH 083/121] extract tao_weight --- pallets/subtensor/src/coinbase/run_coinbase.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 6a95922e26..3705a6c366 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -290,6 +290,7 @@ impl Pallet { pub fn calculate_dividend_distribution( pending_alpha: u64, pending_tao: u64, + tao_weight: I96F32, stake_map: BTreeMap, dividends: BTreeMap, ) -> ( @@ -313,7 +314,7 @@ impl Pallet { let root_stake: I96F32 = asfloat!(*root_stake_u64); // Convert TAO to alpha with weight. - let root_alpha: I96F32 = root_stake.saturating_mul(Self::get_tao_weight()); + let root_alpha: I96F32 = root_stake.saturating_mul(tao_weight); // Get total from root and local let total_alpha: I96F32 = alpha_stake.saturating_add(root_alpha); // Compute root prop. @@ -530,9 +531,12 @@ impl Pallet { let stake_map: BTreeMap = Self::get_stake_map(netuid, dividends.keys().collect::>()); + let tao_weight = Self::get_tao_weight(); + let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( pending_validator_alpha, pending_tao, + tao_weight, stake_map, dividends, ); From c0d04c6bfac524c9ef56e3e29db3c6e6b450add6 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 19:38:32 -0400 Subject: [PATCH 084/121] test dividend distr sums near inputs --- pallets/subtensor/src/tests/coinbase.rs | 42 +++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 889d984f3c..fa1d7119e3 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1489,3 +1489,45 @@ fn test_incentive_to_subnet_owner_is_burned() { }); } +#[test] +fn test_calculate_dividend_distribution_totals() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 837_120_949_872; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} From 76b52d8f363d3aba81fbf9ec5448340388030a47 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 20:39:09 -0400 Subject: [PATCH 085/121] extract functions from drain_pending --- .../subtensor/src/coinbase/run_coinbase.rs | 45 ++++++++++++++----- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 3705a6c366..fcc94e95db 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -500,22 +500,18 @@ impl Pallet { stake_map } - pub fn drain_pending_emission( + pub fn calculate_dividend_and_incentive_distribution( netuid: u16, pending_alpha: u64, pending_tao: u64, pending_swapped: u64, - owner_cut: u64, + ) -> ( + BTreeMap, + ( + BTreeMap, + BTreeMap, + ), ) { - log::debug!( - "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", - netuid, - pending_alpha, - pending_tao, - pending_swapped, - owner_cut - ); - // Compute the pending validator alpha. // This is the total alpha being injected, // minus the the alpha for the miners, (50%) @@ -541,6 +537,33 @@ impl Pallet { dividends, ); + (incentives, (alpha_dividends, tao_dividends)) + } + + pub fn drain_pending_emission( + netuid: u16, + pending_alpha: u64, + pending_tao: u64, + pending_swapped: u64, + owner_cut: u64, + ) { + log::debug!( + "Draining pending alpha emission for netuid {:?}, pending_alpha: {:?}, pending_tao: {:?}, pending_swapped: {:?}, owner_cut: {:?}", + netuid, + pending_alpha, + pending_tao, + pending_swapped, + owner_cut + ); + + let (incentives, (alpha_dividends, tao_dividends)) = + Self::calculate_dividend_and_incentive_distribution( + netuid, + pending_alpha, + pending_tao, + pending_swapped, + ); + Self::distribute_dividends_and_incentives( netuid, owner_cut, From a2e0a315bb6064dfb8769509a7203e120d335e4f Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 20:45:51 -0400 Subject: [PATCH 086/121] add more tests --- pallets/subtensor/src/tests/coinbase.rs | 130 ++++++++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index fa1d7119e3..e87bfe39cf 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1531,3 +1531,133 @@ fn test_calculate_dividend_distribution_totals() { ); }); } + +#[test] +fn test_calculate_dividend_distribution_total_only_tao() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 0; + let pending_tao: u64 = 837_120_949_872; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_no_tao_weight() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 0; // If tao weight is 0, then only alpha dividends should be input. + let tao_weight: I96F32 = I96F32::saturating_from_num(0.0); // 0% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + +#[test] +fn test_calculate_dividend_distribution_total_only_alpha() { + new_test_ext(1).execute_with(|| { + let mut stake_map: BTreeMap = BTreeMap::new(); + let mut dividends: BTreeMap = BTreeMap::new(); + + let pending_validator_alpha: u64 = 183_123_567_452; + let pending_tao: u64 = 0; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.18); // 18% + + let hotkeys = [U256::from(0), U256::from(1)]; + + // Stake map and dividends shouldn't matter for this test. + stake_map.insert(hotkeys[0], (4_859_302, 2_342_352)); + stake_map.insert(hotkeys[1], (23_423, 859_273)); + dividends.insert(hotkeys[0], 77_783_738.into()); + dividends.insert(hotkeys[1], 19_283_940.into()); + + let (alpha_dividends, tao_dividends) = SubtensorModule::calculate_dividend_distribution( + pending_validator_alpha, + pending_tao, + tao_weight, + stake_map, + dividends, + ); + + // Verify the total of each dividends type is close to the inputs. + let total_alpha_dividends = alpha_dividends.values().sum::(); + let total_tao_dividends = tao_dividends.values().sum::(); + + assert_abs_diff_eq!( + total_alpha_dividends.saturating_to_num::(), + pending_validator_alpha, + epsilon = 1_000 + ); + assert_abs_diff_eq!( + total_tao_dividends.saturating_to_num::(), + pending_tao, + epsilon = 1_000 + ); + }); +} + From 28a17c352238fd267808e0c04d1b99d2aac64a45 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 23:11:06 -0400 Subject: [PATCH 087/121] add epoch test for single reg --- pallets/subtensor/src/tests/epoch.rs | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 38b104ac2b..1f5b817156 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -8,6 +8,7 @@ use super::mock::*; use crate::epoch::math::safe_exp; use crate::*; +use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_ok}; // use frame_system::Config; @@ -2828,6 +2829,41 @@ fn test_can_set_self_weight_as_subnet_owner() { }); } +#[test] +fn test_epoch_outputs_single_staker_registered_no_weights() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let high_tempo: u16 = u16::MAX - 1; // Don't run automatically. + add_network(netuid, high_tempo, 0); + + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha: u64 = 1_000_000_000; + let hotkey_emission: Vec<(U256, u64, u64)> = SubtensorModule::epoch(netuid, pending_alpha); + + let sum_incentives: u64 = hotkey_emission + .iter() + .map(|(_, incentive, _)| incentive) + .sum(); + let sum_dividends: u64 = hotkey_emission + .iter() + .map(|(_, _, dividend)| dividend) + .sum(); + + assert_abs_diff_eq!( + sum_incentives.saturating_add(sum_dividends), + pending_alpha, + epsilon = 1_000 + ); + }); +} + // Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, // of which the first 64 are validators, the graph is split into a major and minor set, each setting // specific weight on itself and the complement on the other. From 834b6d30908b5f324830bc36b37156275b32a257 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Fri, 21 Mar 2025 23:12:58 -0400 Subject: [PATCH 088/121] add test for calc divs and incentives fn --- pallets/subtensor/src/tests/coinbase.rs | 40 +++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index e87bfe39cf..dcb5426ece 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1661,3 +1661,43 @@ fn test_calculate_dividend_distribution_total_only_alpha() { }); } +#[test] +fn test_calculate_dividends_and_incentives_only_alpha() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &coldkey, + netuid, + 1, + ); + + let pending_alpha = 123_456_789; + let pending_swapped = 0; // Only alpha output. + + let (incentives, dividends) = SubtensorModule::calculate_dividends_and_incentives( + netuid, + pending_alpha, + pending_swapped, + ); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + pending_alpha + ); + }); +} From 4e33275a400c8cd4ee4c744a41a68c626a622378 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Sat, 22 Mar 2025 02:18:22 -0400 Subject: [PATCH 089/121] extract calls to drain --- .../subtensor/src/coinbase/run_coinbase.rs | 47 ++++++++++--------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index fcc94e95db..fc950517c8 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -253,14 +253,8 @@ impl Pallet { pub fn calculate_dividends_and_incentives( netuid: u16, - pending_alpha: u64, - pending_swapped: u64, + hotkey_emission: Vec<(T::AccountId, u64, u64)>, ) -> (BTreeMap, BTreeMap) { - // Run the epoch. - let hotkey_emission: Vec<(T::AccountId, u64, u64)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); - log::debug!("hotkey_emission: {:?}", hotkey_emission); - // Accumulate emission of dividends and incentive per hotkey. let mut incentives: BTreeMap = BTreeMap::new(); let mut dividends: BTreeMap = BTreeMap::new(); @@ -502,9 +496,10 @@ impl Pallet { pub fn calculate_dividend_and_incentive_distribution( netuid: u16, - pending_alpha: u64, pending_tao: u64, - pending_swapped: u64, + pending_validator_alpha: u64, + hotkey_emission: Vec<(T::AccountId, u64, u64)>, + tao_weight: I96F32, ) -> ( BTreeMap, ( @@ -512,23 +507,12 @@ impl Pallet { BTreeMap, ), ) { - // Compute the pending validator alpha. - // This is the total alpha being injected, - // minus the the alpha for the miners, (50%) - // and minus the alpha swapped for TAO (pending_swapped). - let pending_validator_alpha: u64 = pending_alpha - .saturating_add(pending_swapped) - .saturating_div(2) - .saturating_sub(pending_swapped); - let (incentives, dividends) = - Self::calculate_dividends_and_incentives(netuid, pending_alpha, pending_swapped); + Self::calculate_dividends_and_incentives(netuid, hotkey_emission); let stake_map: BTreeMap = Self::get_stake_map(netuid, dividends.keys().collect::>()); - let tao_weight = Self::get_tao_weight(); - let (alpha_dividends, tao_dividends) = Self::calculate_dividend_distribution( pending_validator_alpha, pending_tao, @@ -556,12 +540,29 @@ impl Pallet { owner_cut ); + let tao_weight = Self::get_tao_weight(); + + // Run the epoch. + let hotkey_emission: Vec<(T::AccountId, u64, u64)> = + Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + log::debug!("hotkey_emission: {:?}", hotkey_emission); + + // Compute the pending validator alpha. + // This is the total alpha being injected, + // minus the the alpha for the miners, (50%) + // and minus the alpha swapped for TAO (pending_swapped). + let pending_validator_alpha: u64 = pending_alpha + .saturating_add(pending_swapped) + .saturating_div(2) + .saturating_sub(pending_swapped); + let (incentives, (alpha_dividends, tao_dividends)) = Self::calculate_dividend_and_incentive_distribution( netuid, - pending_alpha, pending_tao, - pending_swapped, + pending_validator_alpha, + hotkey_emission, + tao_weight, ); Self::distribute_dividends_and_incentives( From c46718da1702cbcf9c6f5efa429bb4eb3881a120 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Sat, 22 Mar 2025 02:18:29 -0400 Subject: [PATCH 090/121] fix/add tests --- pallets/subtensor/src/tests/coinbase.rs | 187 ++++++++++++++++++++++-- 1 file changed, 175 insertions(+), 12 deletions(-) diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index dcb5426ece..8e12996974 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1662,7 +1662,7 @@ fn test_calculate_dividend_distribution_total_only_alpha() { } #[test] -fn test_calculate_dividends_and_incentives_only_alpha() { +fn test_calculate_dividend_and_incentive_distribution() { new_test_ext(1).execute_with(|| { let sn_owner_hk = U256::from(0); let sn_owner_ck = U256::from(1); @@ -1672,25 +1672,77 @@ fn test_calculate_dividends_and_incentives_only_alpha() { let hotkey = U256::from(1); let coldkey = U256::from(2); register_ok_neuron(netuid, hotkey, coldkey, 0); - // Give non-zero alpha - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( - &hotkey, - &coldkey, - netuid, - 1, + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, ); let pending_alpha = 123_456_789; + let pending_validator_alpha = pending_alpha / 2; // Pay half to validators. + let pending_tao: u64 = 0; let pending_swapped = 0; // Only alpha output. + let tao_weight: I96F32 = I96F32::saturating_from_num(0.0); // 0% - let (incentives, dividends) = SubtensorModule::calculate_dividends_and_incentives( - netuid, - pending_alpha, - pending_swapped, + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, pending_alpha / 2, pending_alpha / 2)]; + + let (incentives, (alpha_dividends, tao_dividends)) = + SubtensorModule::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); + + let incentives_total = incentives.values().sum::(); + let dividends_total = alpha_dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + pending_alpha ); + }); +} + +#[test] +fn test_calculate_dividend_and_incentive_distribution_all_to_validators() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let pending_alpha = 123_456_789; + let pending_validator_alpha = pending_alpha; // Pay all to validators. + let pending_tao: u64 = 0; + let tao_weight: I96F32 = I96F32::saturating_from_num(0.0); // 0% + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, 0, pending_alpha)]; + + let (incentives, (alpha_dividends, tao_dividends)) = + SubtensorModule::calculate_dividend_and_incentive_distribution( + netuid, + pending_tao, + pending_validator_alpha, + hotkey_emission, + tao_weight, + ); let incentives_total = incentives.values().sum::(); - let dividends_total = dividends + let dividends_total = alpha_dividends .values() .sum::() .saturating_to_num::(); @@ -1701,3 +1753,114 @@ fn test_calculate_dividends_and_incentives_only_alpha() { ); }); } + +#[test] +fn test_calculate_dividends_and_incentives() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 123_456_789; + let incentive: u64 = 683_051_923; + let total_emission: u64 = divdends.saturating_add(incentive); + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!( + dividends_total.saturating_add(incentives_total), + total_emission + ); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives_only_validators() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 123_456_789; + let incentive: u64 = 0; + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!(dividends_total, divdends); + assert_eq!(incentives_total, 0); + }); +} + +#[test] +fn test_calculate_dividends_and_incentives_only_miners() { + new_test_ext(1).execute_with(|| { + let sn_owner_hk = U256::from(0); + let sn_owner_ck = U256::from(1); + let netuid = add_dynamic_network(&sn_owner_hk, &sn_owner_ck); + + // Register a single neuron. + let hotkey = U256::from(1); + let coldkey = U256::from(2); + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, 1, + ); + + let divdends: u64 = 0; + let incentive: u64 = 123_456_789; + + // Hotkey, Incentive, Dividend + let hotkey_emission = vec![(hotkey, incentive, divdends)]; + + let (incentives, dividends) = + SubtensorModule::calculate_dividends_and_incentives(netuid, hotkey_emission); + + let incentives_total = incentives.values().sum::(); + let dividends_total = dividends + .values() + .sum::() + .saturating_to_num::(); + + assert_eq!(incentives_total, incentive); + assert_eq!(dividends_total, divdends); + }); +} From c35d7eef28d10b77c4a1f62bd4049c2cd16b8ee0 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Mon, 24 Mar 2025 23:23:57 -0400 Subject: [PATCH 091/121] fix no miners alpha drain --- .../subtensor/src/coinbase/run_coinbase.rs | 26 ++++++++++++--- pallets/subtensor/src/tests/coinbase.rs | 33 +++++++++++++++++++ 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index fc950517c8..781be728d6 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -291,6 +291,12 @@ impl Pallet { BTreeMap, BTreeMap, ) { + log::debug!("dividends: {:?}", dividends); + log::debug!("stake_map: {:?}", stake_map); + log::debug!("pending_alpha: {:?}", pending_alpha); + log::debug!("pending_tao: {:?}", pending_tao); + log::debug!("tao_weight: {:?}", tao_weight); + // Setup. let zero: I96F32 = asfloat!(0.0); @@ -551,10 +557,22 @@ impl Pallet { // This is the total alpha being injected, // minus the the alpha for the miners, (50%) // and minus the alpha swapped for TAO (pending_swapped). - let pending_validator_alpha: u64 = pending_alpha - .saturating_add(pending_swapped) - .saturating_div(2) - .saturating_sub(pending_swapped); + // Important! If the incentives are 0, then Validators get 100% of the alpha. + let incentive_sum = hotkey_emission + .iter() + .map(|(_, incentive, _)| incentive) + .sum::(); + log::debug!("incentive_sum: {:?}", incentive_sum); + + let pending_validator_alpha: u64 = if incentive_sum != 0 { + pending_alpha + .saturating_add(pending_swapped) + .saturating_div(2) + .saturating_sub(pending_swapped) + } else { + // If the incentive is 0, then Validators get 100% of the alpha. + pending_alpha + }; let (incentives, (alpha_dividends, tao_dividends)) = Self::calculate_dividend_and_incentive_distribution( diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 8e12996974..91748bc170 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1864,3 +1864,36 @@ fn test_calculate_dividends_and_incentives_only_miners() { assert_eq!(dividends_total, divdends); }); } + +#[test] +fn test_drain_pending_emission_no_miners_all_drained() { + new_test_ext(1).execute_with(|| { + let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let init_stake: u64 = 1; + register_ok_neuron(netuid, hotkey, coldkey, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + // Set the emission to be 1 million. + let emission: u64 = 1_000_000; + // Run drain pending without any miners. + SubtensorModule::drain_pending_emission(netuid, emission, 0, 0, 0); + + // Get the new stake of the hotkey. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect this neuron to get *all* the emission. + // Slight epsilon due to rounding (hotkey_take). + assert_abs_diff_eq!(new_stake, emission.saturating_add(init_stake), epsilon = 1); + }); +} From 705dd77f7af4122899a31a3c78ea7591e18ffd3d Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Mon, 24 Mar 2025 23:36:03 -0400 Subject: [PATCH 092/121] remove magic number from test --- pallets/subtensor/src/tests/coinbase.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 91748bc170..ee60638349 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1058,11 +1058,11 @@ fn test_get_root_children_drain() { // Alice and Bob both made half of the dividends. assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&alice, alpha), - alice_alpha_stake + pending_alpha / 4 + alice_alpha_stake + pending_alpha / 2 ); assert_eq!( SubtensorModule::get_stake_for_hotkey_on_subnet(&bob, alpha), - bob_alpha_stake + pending_alpha / 4 + bob_alpha_stake + pending_alpha / 2 ); // Lets drain @@ -1092,9 +1092,10 @@ fn test_get_root_children_drain() { assert_eq!(AlphaDividendsPerSubnet::::get(alpha, alice), 0); assert_eq!(TaoDividendsPerSubnet::::get(alpha, alice), 0); // Bob makes it all. - assert_eq!( + assert_abs_diff_eq!( AlphaDividendsPerSubnet::::get(alpha, bob), - (I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 - 0.495412844)).to_num::() + pending_alpha, + epsilon = 1 ); assert_eq!(TaoDividendsPerSubnet::::get(alpha, bob), pending_root); }); From 99f43dbb0fa67e41c483fb2c144aa13851294ace Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 02:12:06 -0400 Subject: [PATCH 093/121] fix tests --- .../subtensor/src/coinbase/run_coinbase.rs | 1 + pallets/subtensor/src/tests/coinbase.rs | 59 ++++++++----------- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 781be728d6..1b8bc74b6e 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -342,6 +342,7 @@ impl Pallet { log::debug!("alpha_dividends: {:?}", alpha_dividends); log::debug!("root_dividends: {:?}", root_dividends); log::debug!("total_root_divs: {:?}", total_root_divs); + log::debug!("total_alpha_divs: {:?}", total_alpha_divs); // Compute root divs as TAO. Here we take let mut tao_dividends: BTreeMap = BTreeMap::new(); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index ee60638349..48af2df59e 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -569,7 +569,7 @@ fn test_drain_base_with_subnet_with_single_staker_registered_root_weight() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid); let root_after = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, root); - close(stake_before + pending_alpha / 2, stake_after, 10); // Registered gets all alpha emission. + close(stake_before + pending_alpha, stake_after, 10); // Registered gets all alpha emission. close(stake_before + pending_tao, root_after, 10); // Registered gets all tao emission }); } @@ -660,8 +660,8 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root() { SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, netuid); let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); - close(stake_before + pending_alpha / 4, stake_after1, 10); // Registered gets 1/2 emission - close(stake_before + pending_alpha / 4, stake_after2, 10); // Registered gets 1/2 emission. + close(stake_before + pending_alpha / 2, stake_after1, 10); // Registered gets 1/2 emission + close(stake_before + pending_alpha / 2, stake_after2, 10); // Registered gets 1/2 emission. close(stake_before + pending_tao / 2, root_after1, 10); // Registered gets 1/2 tao emission close(stake_before + pending_tao / 2, root_after2, 10); // Registered gets 1/2 tao emission }); @@ -719,21 +719,17 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); let expected_stake = I96F32::from_num(stake_before) - + (I96F32::from_num(pending_alpha) - * I96F32::from_num(3.0 / 5.0) - * I96F32::from_num(1.0 / 3.0)); - close(expected_stake.to_num::(), stake_after1, 10); // Registered gets 60% of emission + + (I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0)); + assert_abs_diff_eq!(expected_stake.to_num::(), stake_after1, epsilon = 10); // Registered gets 50% of alpha emission let expected_stake2 = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(2.0 / 5.0) - * I96F32::from_num(1.0 / 2.0); - close(expected_stake2.to_num::(), stake_after2, 10); // Registered gets 40% emission + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake2.to_num::(), stake_after2, epsilon = 10); // Registered gets 50% emission let expected_root1 = I96F32::from_num(2 * stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - close(expected_root1.to_num::(), root_after1, 10); // Registered gets 2/3 tao emission + assert_abs_diff_eq!(expected_root1.to_num::(), root_after1, epsilon = 10); // Registered gets 2/3 tao emission let expected_root2 = I96F32::from_num(stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - close(expected_root2.to_num::(), root_after2, 10); // Registered gets 1/3 tao emission + assert_abs_diff_eq!(expected_root2.to_num::(), root_after2, epsilon = 10); // Registered gets 1/3 tao emission }); } @@ -789,26 +785,20 @@ fn test_drain_base_with_subnet_with_two_stakers_registered_and_root_different_am SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, netuid); let root_after2 = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &coldkey, root); - // hotkey 1 has (1 + (2 * 0.5))/( 1 + 1*0.5 + 1 + (2 * 0.5)) = 0.5714285714 of the hotkey emission. let expected_stake = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(0.5714285714) - * I96F32::from_num(1.0 / 2.0); - close(expected_stake.to_num::(), stake_after1, 10); - // hotkey 2 has (1 + 1*0.5)/( 1 + 1*0.5 + 1 + (2 * 0.5)) = 0.4285714286 of the hotkey emission. + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake.to_num::(), stake_after1, epsilon = 10); let expected_stake2 = I96F32::from_num(stake_before) - + I96F32::from_num(pending_alpha) - * I96F32::from_num(0.4285714286) - * I96F32::from_num(2.0 / 3.0); - close(expected_stake2.to_num::(), stake_after2, 10); + + I96F32::from_num(pending_alpha) * I96F32::from_num(1.0 / 2.0); + assert_abs_diff_eq!(expected_stake2.to_num::(), stake_after2, epsilon = 10); // hotkey 1 has 2 / 3 root tao let expected_root1 = I96F32::from_num(2 * stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(2.0 / 3.0); - close(expected_root1.to_num::(), root_after1, 10); + assert_abs_diff_eq!(expected_root1.to_num::(), root_after1, epsilon = 10); // hotkey 1 has 1 / 3 root tao let expected_root2 = I96F32::from_num(stake_before) + I96F32::from_num(pending_tao) * I96F32::from_num(1.0 / 3.0); - close(expected_root2.to_num::(), root_after2, 10); + assert_abs_diff_eq!(expected_root2.to_num::(), root_after2, epsilon = 10); }); } @@ -1173,12 +1163,12 @@ fn test_get_root_children_drain_half_proportion() { // Alice and Bob make the same amount. close( AlphaDividendsPerSubnet::::get(alpha, alice), - pending_alpha / 4, + pending_alpha / 2, 10, ); close( AlphaDividendsPerSubnet::::get(alpha, bob), - pending_alpha / 4, + pending_alpha / 2, 10, ); }); @@ -1244,7 +1234,7 @@ fn test_get_root_children_drain_with_take() { // Set Bob as 100% child of Alice on root. ChildkeyTake::::insert(bob, alpha, u16::MAX); mock_set_children_no_epochs(alpha, &alice, &[(u64::MAX, bob)]); - // Set Bob childkey take to zero. + // Set Bob validator take to zero. Delegates::::insert(alice, 0); Delegates::::insert(bob, 0); @@ -1252,11 +1242,11 @@ fn test_get_root_children_drain_with_take() { let pending_alpha: u64 = 1_000_000_000; SubtensorModule::drain_pending_emission(alpha, pending_alpha, 0, 0, 0); - // Alice and Bob make the same amount. + // Bob makes it all. close(AlphaDividendsPerSubnet::::get(alpha, alice), 0, 10); close( AlphaDividendsPerSubnet::::get(alpha, bob), - pending_alpha / 2, + pending_alpha, 10, ); }); @@ -1333,12 +1323,12 @@ fn test_get_root_children_drain_with_half_take() { // Alice and Bob make the same amount. close( AlphaDividendsPerSubnet::::get(alpha, alice), - pending_alpha / 8, + pending_alpha / 4, 10000, ); close( AlphaDividendsPerSubnet::::get(alpha, bob), - 3 * (pending_alpha / 8), + 3 * (pending_alpha / 4), 10000, ); }); @@ -1702,9 +1692,10 @@ fn test_calculate_dividend_and_incentive_distribution() { .sum::() .saturating_to_num::(); - assert_eq!( + assert_abs_diff_eq!( dividends_total.saturating_add(incentives_total), - pending_alpha + pending_alpha, + epsilon = 2 ); }); } From 64642780ec746eb02b876fa8efdf48d1042b5050 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 13:49:49 -0400 Subject: [PATCH 094/121] spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 25799a75c1..94e82ca8f3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -205,7 +205,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 252, + spec_version: 253, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 6a2be68e6c621943cd01ab78ccb193197dd29dca Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Tue, 25 Mar 2025 16:15:13 -0400 Subject: [PATCH 095/121] bump spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c766000b99..d2a97f0829 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -207,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 254, + spec_version: 255, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From a6a979a40a76a1f79b88a03c298e43ff9f826e4a Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 16:19:46 -0400 Subject: [PATCH 096/121] add subnets_to_emit_to --- .../subtensor/src/coinbase/run_coinbase.rs | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index e2a08b3905..2a4621bf5d 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -37,17 +37,23 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); log::debug!("Current block: {:?}", current_block); - // --- 1. Get all netuids (filter out root and new subnet without first emission block) + // --- 1. Get all netuids (filter out root) let subnets: Vec = Self::get_all_subnet_netuids() .into_iter() .filter(|netuid| *netuid != 0) - .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) .collect(); log::debug!("All subnet netuids: {:?}", subnets); + // Filter out subnets with no first emission block number. + let subnets_to_emit_to: Vec = subnets + .iter() + .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) + .collect(); + log::debug!("Subnets to emit to: {:?}", subnets_to_emit_to); // --- 2. Get sum of tao reserves ( in a later version we will switch to prices. ) let mut total_moving_prices: I96F32 = I96F32::saturating_from_num(0.0); - for netuid_i in subnets.iter() { + // Only get price EMA for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { // Get and update the moving price of each subnet adding the total together. total_moving_prices = total_moving_prices.saturating_add(Self::get_moving_alpha_price(*netuid_i)); @@ -59,7 +65,8 @@ impl Pallet { let mut tao_in: BTreeMap = BTreeMap::new(); let mut alpha_in: BTreeMap = BTreeMap::new(); let mut alpha_out: BTreeMap = BTreeMap::new(); - for netuid_i in subnets.iter() { + // Only calculate for subnets that we are emitting to. + for netuid_i in subnets_to_emit_to.iter() { // Get subnet price. let price_i: I96F32 = Self::get_alpha_price(*netuid_i); log::debug!("price_i: {:?}", price_i); @@ -104,7 +111,7 @@ impl Pallet { // --- 4. Injection. // Actually perform the injection of alpha_in, alpha_out and tao_in into the subnet pool. // This operation changes the pool liquidity each block. - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Inject Alpha in. let alpha_in_i: u64 = tou64!(*alpha_in.get(netuid_i).unwrap_or(&asfloat!(0))); SubnetAlphaInEmission::::insert(*netuid_i, alpha_in_i); @@ -136,7 +143,7 @@ impl Pallet { // Owner cuts are accumulated and then fed to the drain at the end of this func. let cut_percent: I96F32 = Self::get_float_subnet_owner_cut(); let mut owner_cuts: BTreeMap = BTreeMap::new(); - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Get alpha out. let alpha_out_i: I96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0)); log::debug!("alpha_out_i: {:?}", alpha_out_i); @@ -155,7 +162,7 @@ impl Pallet { // --- 6. Seperate out root dividends in alpha and sell them into tao. // Then accumulate those dividends for later. - for netuid_i in subnets.iter() { + for netuid_i in subnets_to_emit_to.iter() { // Get remaining alpha out. let alpha_out_i: I96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0.0)); log::debug!("alpha_out_i: {:?}", alpha_out_i); @@ -200,12 +207,14 @@ impl Pallet { } // --- 7 Update moving prices after using them in the emission calculation. - for netuid_i in subnets.iter() { + // Only update price EMA for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { // Update moving prices after using them above. Self::update_moving_price(*netuid_i); } // --- 7. Drain pending emission through the subnet based on tempo. + // Run the epoch for *all* subnets, even if we don't emit anything. for &netuid in subnets.iter() { // Pass on subnets that have not reached their tempo. if Self::should_run_epoch(netuid, current_block) { From 33a970c3dd47e1f5cd714ac70829538cef301d6a Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 16:39:05 -0400 Subject: [PATCH 097/121] fix clippy --- pallets/subtensor/src/coinbase/run_coinbase.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 2a4621bf5d..1ff8b2760d 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -45,7 +45,8 @@ impl Pallet { log::debug!("All subnet netuids: {:?}", subnets); // Filter out subnets with no first emission block number. let subnets_to_emit_to: Vec = subnets - .iter() + .clone() + .into_iter() .filter(|netuid| FirstEmissionBlockNumber::::get(*netuid).is_some()) .collect(); log::debug!("Subnets to emit to: {:?}", subnets_to_emit_to); From e7ea5d9cf9e5f4120d9d048f06564cad16f745c1 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 17:52:27 -0400 Subject: [PATCH 098/121] move test utils --- pallets/subtensor/src/tests/epoch.rs | 22 ---------------------- pallets/subtensor/src/tests/mock.rs | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 1bd998e7a5..aaaf93e086 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -983,28 +983,6 @@ fn test_512_graph_random_weights() { // }); // } -fn next_block_no_epoch(netuid: u16) -> u64 { - // high tempo to skip automatic epochs in on_initialize - let high_tempo: u16 = u16::MAX - 1; - let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - - SubtensorModule::set_tempo(netuid, high_tempo); - let new_block = next_block(); - SubtensorModule::set_tempo(netuid, old_tempo); - - new_block -} - -fn run_to_block_no_epoch(netuid: u16, n: u64) { - // high tempo to skip automatic epochs in on_initialize - let high_tempo: u16 = u16::MAX - 1; - let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - - SubtensorModule::set_tempo(netuid, high_tempo); - run_to_block(n); - SubtensorModule::set_tempo(netuid, old_tempo); -} - // Test bonds exponential moving average over a sequence of epochs. #[test] fn test_bonds() { diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index ea5e3d4492..0505841c69 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -594,6 +594,30 @@ pub(crate) fn run_to_block(n: u64) { } } +#[allow(dead_code)] +pub(crate) fn next_block_no_epoch(netuid: u16) -> u64 { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + let new_block = next_block(); + SubtensorModule::set_tempo(netuid, old_tempo); + + new_block +} + +#[allow(dead_code)] +pub(crate) fn run_to_block_no_epoch(netuid: u16, n: u64) { + // high tempo to skip automatic epochs in on_initialize + let high_tempo: u16 = u16::MAX - 1; + let old_tempo: u16 = SubtensorModule::get_tempo(netuid); + + SubtensorModule::set_tempo(netuid, high_tempo); + run_to_block(n); + SubtensorModule::set_tempo(netuid, old_tempo); +} + #[allow(dead_code)] pub(crate) fn step_epochs(count: u16, netuid: u16) { for _ in 0..count { From d100cb5d278b6c4b371b5be34ecaef0223fcef0a Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 18:37:29 -0400 Subject: [PATCH 099/121] add tests. including for start call --- pallets/subtensor/src/tests/coinbase.rs | 242 ++++++++++++++++++++++++ 1 file changed, 242 insertions(+) diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 48af2df59e..0a95f6dd60 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1889,3 +1889,245 @@ fn test_drain_pending_emission_no_miners_all_drained() { assert_abs_diff_eq!(new_stake, emission.saturating_add(init_stake), epsilon = 1); }); } + +#[test] +fn test_drain_pending_emission_zero_emission() { + new_test_ext(1).execute_with(|| { + let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 50); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + run_to_block_no_epoch(netuid, 50); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Set the emission to be ZERO. + SubtensorModule::drain_pending_emission(netuid, 0, 0, 0, 0); + + // Get the new stake of the hotkey. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert_eq!(new_stake, init_stake); + + // Check that the incentive and dividends are set by epoch. + assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!(Dividends::::get(netuid).iter().sum::() > 0); + }); +} + +#[test] +fn test_run_coinbase_not_started() { + new_test_ext(1).execute_with(|| { + let netuid = 1; + let tempo = 2; + + let sn_owner_hk = U256::from(7); + let sn_owner_ck = U256::from(8); + + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + SubnetOwner::::insert(netuid, sn_owner_ck); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hk); + + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + register_ok_neuron(netuid, sn_owner_hk, sn_owner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 30); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Step so tempo should run. + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + let current_block = System::block_number(); + assert!(SubtensorModule::should_run_epoch(netuid, current_block)); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(I96F32::saturating_from_num(100_000_000)); + + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + // Get the new stake of the hotkey. We expect no emissions. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert_eq!(new_stake, init_stake); + + // Check that the incentive and dividends are set. + assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!(Dividends::::get(netuid).iter().sum::() > 0); + }); +} + +#[test] +fn test_run_coinbase_not_started_start_after() { + new_test_ext(1).execute_with(|| { + let netuid = 1; + let tempo = 2; + + let sn_owner_hk = U256::from(7); + let sn_owner_ck = U256::from(8); + + add_network_without_emission_block(netuid, tempo, 0); + assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); + + SubnetOwner::::insert(netuid, sn_owner_ck); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hk); + + let hotkey = U256::from(3); + let coldkey = U256::from(4); + let miner_hk = U256::from(5); + let miner_ck = U256::from(6); + let init_stake: u64 = 100_000_000_000_000; + let tempo = 2; + SubtensorModule::set_tempo(netuid, tempo); + // Set weight-set limit to 0. + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, miner_hk, miner_ck, 0); + register_ok_neuron(netuid, sn_owner_hk, sn_owner_ck, 0); + // Give non-zero stake + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, init_stake, + ); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + init_stake + ); + + // Set the weight of root TAO to be 0%, so only alpha is effective. + SubtensorModule::set_tao_weight(0); + + run_to_block_no_epoch(netuid, 30); + + // Run epoch for initial setup. + SubtensorModule::epoch(netuid, 0); + + // Set weights on miner + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![0, 1, 2], + vec![0, 0, 1], + 0, + )); + + // Clear incentive and dividends. + Incentive::::remove(netuid); + Dividends::::remove(netuid); + + // Step so tempo should run. + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + let current_block = System::block_number(); + assert!(SubtensorModule::should_run_epoch(netuid, current_block)); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(I96F32::saturating_from_num(100_000_000)); + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + let block_number = DurationOfStartCall::get(); + run_to_block_no_epoch(netuid, block_number); + + let current_block = System::block_number(); + + // Run start call. + assert_ok!(SubtensorModule::start_call( + RuntimeOrigin::signed(sn_owner_ck), + netuid + )); + assert_eq!( + FirstEmissionBlockNumber::::get(netuid), + Some(current_block + 1) + ); + + // Run coinbase with emission. + SubtensorModule::run_coinbase(I96F32::saturating_from_num(100_000_000)); + // We expect that the epoch ran. + assert_eq!(BlocksSinceLastStep::::get(netuid), 0); + + // Get the new stake of the hotkey. We expect no emissions. + let new_stake = SubtensorModule::get_total_stake_for_hotkey(&hotkey); + // We expect the stake to remain unchanged. + assert!(new_stake > init_stake); + log::info!("new_stake: {}", new_stake); + }); +} From 75c0d6ec2fcad63ece97ff6c62df55763277f55c Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 21:31:46 -0400 Subject: [PATCH 100/121] set init bonds penalty to 1.0 (u16 max) --- pallets/subtensor/src/tests/mock.rs | 2 +- runtime/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index ea5e3d4492..d6620ab2d7 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -138,7 +138,7 @@ parameter_types! { pub const InitialImmunityPeriod: u16 = 2; pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; - pub const InitialBondsPenalty:u16 = 0; + pub const InitialBondsPenalty:u16 = u16::MAX; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d2a97f0829..ee920ef930 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1010,7 +1010,7 @@ parameter_types! { pub const SubtensorInitialMaxRegistrationsPerBlock: u16 = 1; pub const SubtensorInitialPruningScore : u16 = u16::MAX; pub const SubtensorInitialBondsMovingAverage: u64 = 900_000; - pub const SubtensorInitialBondsPenalty: u16 = 0; + pub const SubtensorInitialBondsPenalty: u16 = u16::MAX; pub const SubtensorInitialDefaultTake: u16 = 11_796; // 18% honest number. pub const SubtensorInitialMinDelegateTake: u16 = 0; // Allow 0% delegate take pub const SubtensorInitialDefaultChildKeyTake: u16 = 0; // Allow 0% childkey take From d48f22459616640412cd3f5b330afa8f306b2d9a Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 25 Mar 2025 21:36:23 -0400 Subject: [PATCH 101/121] oops --- pallets/admin-utils/src/tests/mock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 43a40ec32e..99c11b7165 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -86,7 +86,7 @@ parameter_types! { pub const InitialImmunityPeriod: u16 = 2; pub const InitialMaxAllowedUids: u16 = 2; pub const InitialBondsMovingAverage: u64 = 900_000; - pub const InitialBondsPenalty: u16 = 0; + pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; pub const InitialDefaultDelegateTake: u16 = 11_796; // 18% honest number. From a77ea67126003a7593cbba724f12c2cab33fae9e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Mar 2025 08:07:38 -0700 Subject: [PATCH 102/121] add error UnexpectedUnreserveLeftover --- pallets/commitments/src/lib.rs | 6 ++-- pallets/commitments/src/tests.rs | 48 ++++++++++++++++++++++++++++++-- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index e838f81a30..574dbeb735 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -120,6 +120,8 @@ pub mod pallet { CommitmentSetRateLimitExceeded, /// Space Limit Exceeded for the current interval SpaceLimitExceeded, + /// Indicates that unreserve returned a leftover, which is unexpected. + UnexpectedUnreserveLeftover, } #[pallet::type_value] @@ -264,9 +266,7 @@ pub mod pallet { T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; } if old_deposit > id.deposit { - let err_amount = - T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); - debug_assert!(err_amount.is_zero()); + return Err(Error::::UnexpectedUnreserveLeftover.into()); } >::insert(netuid, &who, id); diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index e6bf38c445..592feb0921 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -4,14 +4,17 @@ use sp_std::prelude::*; #[cfg(test)] use crate::{ CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, RateLimit, - RevealedCommitments, TimelockedIndex, + Registration, RevealedCommitments, TimelockedIndex, mock::{ - DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, + Balances, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, insert_drand_pulse, new_test_ext, produce_ciphertext, }, }; use frame_support::pallet_prelude::Hooks; -use frame_support::{BoundedVec, assert_noop, assert_ok, traits::Get}; +use frame_support::{ + BoundedVec, assert_noop, assert_ok, + traits::{Currency, Get, ReservableCurrency}, +}; use frame_system::Pallet as System; #[allow(clippy::indexing_slicing)] @@ -1220,3 +1223,42 @@ fn on_initialize_reveals_matured_timelocks() { } }); } + +#[test] +fn set_commitment_unreserve_leftover_fails() { + new_test_ext().execute_with(|| { + use frame_system::RawOrigin; + + let netuid = 999; + let who = 99; + + Balances::make_free_balance_be(&who, 10_000); + + let fake_deposit = 100; + let dummy_info = CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("empty fields is fine"), + }; + let registration = Registration { + deposit: fake_deposit, + info: dummy_info, + block: 0u64.into(), + }; + + CommitmentOf::::insert(netuid, &who, registration); + + assert_ok!(Balances::reserve(&who, fake_deposit)); + assert_eq!(Balances::reserved_balance(who), 100); + + Balances::unreserve(&who, 10_000); + assert_eq!(Balances::reserved_balance(who), 0); + + let commit_small = Box::new(CommitmentInfo { + fields: BoundedVec::try_from(vec![]).expect("no fields is fine"), + }); + + assert_noop!( + Pallet::::set_commitment(RawOrigin::Signed(who).into(), netuid, commit_small), + Error::::UnexpectedUnreserveLeftover + ); + }); +} From ba02bea8f9f6caf0d2b927beb11b2ba4700d068a Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Mar 2025 10:12:50 -0700 Subject: [PATCH 103/121] better logic --- pallets/commitments/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 574dbeb735..a62084de5c 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -266,7 +266,11 @@ pub mod pallet { T::Currency::reserve(&who, id.deposit.saturating_sub(old_deposit))?; } if old_deposit > id.deposit { - return Err(Error::::UnexpectedUnreserveLeftover.into()); + let err_amount = + T::Currency::unreserve(&who, old_deposit.saturating_sub(id.deposit)); + if !err_amount.is_zero() { + return Err(Error::::UnexpectedUnreserveLeftover.into()); + } } >::insert(netuid, &who, id); From 9179e14ac690a00dc7e659a34397792c3f2a7bf5 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 14:08:17 -0400 Subject: [PATCH 104/121] fix min burn set too low --- pallets/subtensor/src/subnets/subnet.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 310c1d5fde..b47684feaf 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -272,7 +272,6 @@ impl Pallet { Self::set_target_registrations_per_interval(netuid, 1); Self::set_adjustment_alpha(netuid, 17_893_341_751_498_265_066); // 18_446_744_073_709_551_615 * 0.97 = 17_893_341_751_498_265_066 Self::set_immunity_period(netuid, 5000); - Self::set_min_burn(netuid, 1); Self::set_min_difficulty(netuid, u64::MAX); Self::set_max_difficulty(netuid, u64::MAX); From 8d3044c65d157f8d33b4d563dd4080ba3d0bd9b1 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 14:15:37 -0400 Subject: [PATCH 105/121] add to mock and add a test --- pallets/subtensor/src/tests/mock.rs | 2 +- pallets/subtensor/src/tests/subnet.rs | 37 +++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index cf0f14ea7c..9729d55d1a 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -152,7 +152,7 @@ parameter_types! { pub const InitialTxDelegateTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialTxChildKeyTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialBurn: u64 = 0; - pub const InitialMinBurn: u64 = 0; + pub const InitialMinBurn: u64 = 500_000; pub const InitialMaxBurn: u64 = 1_000_000_000; pub const InitialValidatorPruneLen: u64 = 0; pub const InitialScalingLawPower: u16 = 50; diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index 1daf19159f..de9b112f1e 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -222,3 +222,40 @@ fn test_do_start_call_ok_with_same_block_number_after_coinbase() { } }); } + +#[test] +fn test_register_network_min_burn_at_default() { + new_test_ext(0).execute_with(|| { + let sn_owner_coldkey = U256::from(0); + let sn_owner_hotkey = U256::from(1); + let cost = SubtensorModule::get_network_lock_cost(); + + // Give coldkey enough for lock + SubtensorModule::add_balance_to_coldkey_account(&sn_owner_coldkey, cost + 10_000_000_000); + + // Register network + assert_ok!(SubtensorModule::register_network( + <::RuntimeOrigin>::signed(sn_owner_coldkey), + sn_owner_hotkey + )); + // Get last events + let events = System::events(); + let min_burn_event = events + .iter() + .filter(|event| { + matches!( + event.event, + RuntimeEvent::SubtensorModule(SubtensorEvent::NetworkAdded(_, _)) + ) + }) + .last() + .unwrap(); + let netuid = match min_burn_event.event { + RuntimeEvent::SubtensorModule(SubtensorEvent::NetworkAdded(netuid, _)) => netuid, + _ => panic!("Expected NetworkAdded event"), + }; + + // Check min burn is set to default + assert_eq!(MinBurn::::get(netuid), InitialMinBurn::get()); + }); +} From 96f7501c8dd3d34e34428557d6e13bf6b765a6ec Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 14:15:49 -0400 Subject: [PATCH 106/121] spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ee920ef930..1aec928978 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -207,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 255, + spec_version: 256, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From ada3a063ab91e8927e5024ca410f96f86ef3fd4a Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 14:17:59 -0400 Subject: [PATCH 107/121] fix mechid in event --- pallets/subtensor/src/subnets/subnet.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index b47684feaf..e4721c03f5 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -239,7 +239,7 @@ impl Pallet { netuid_to_register, mechid ); - Self::deposit_event(Event::NetworkAdded(netuid_to_register, 0)); + Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); // --- 17. Return success. Ok(()) From 33779ef9f8e8243ef7460ce6562287432feaaf65 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 14:24:22 -0400 Subject: [PATCH 108/121] fix event filtering --- pallets/subtensor/src/tests/subnet.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index de9b112f1e..cb331e8f17 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -225,7 +225,7 @@ fn test_do_start_call_ok_with_same_block_number_after_coinbase() { #[test] fn test_register_network_min_burn_at_default() { - new_test_ext(0).execute_with(|| { + new_test_ext(1).execute_with(|| { let sn_owner_coldkey = U256::from(0); let sn_owner_hotkey = U256::from(1); let cost = SubtensorModule::get_network_lock_cost(); @@ -245,13 +245,13 @@ fn test_register_network_min_burn_at_default() { .filter(|event| { matches!( event.event, - RuntimeEvent::SubtensorModule(SubtensorEvent::NetworkAdded(_, _)) + RuntimeEvent::SubtensorModule(Event::::NetworkAdded(..)) ) }) .last() .unwrap(); let netuid = match min_burn_event.event { - RuntimeEvent::SubtensorModule(SubtensorEvent::NetworkAdded(netuid, _)) => netuid, + RuntimeEvent::SubtensorModule(Event::::NetworkAdded(netuid, _)) => netuid, _ => panic!("Expected NetworkAdded event"), }; From 32954b8378614c4489a836ac49eacfd75e685bc2 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Mar 2025 11:44:49 -0700 Subject: [PATCH 109/121] clippy --- pallets/commitments/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 592feb0921..f03e99080e 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -1244,7 +1244,7 @@ fn set_commitment_unreserve_leftover_fails() { block: 0u64.into(), }; - CommitmentOf::::insert(netuid, &who, registration); + CommitmentOf::::insert(netuid, who, registration); assert_ok!(Balances::reserve(&who, fake_deposit)); assert_eq!(Balances::reserved_balance(who), 100); From 58b0640d208a6f2087b54272f7e5a7d4d743f44a Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 26 Mar 2025 11:55:11 -0700 Subject: [PATCH 110/121] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ee920ef930..1aec928978 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -207,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 255, + spec_version: 256, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 67920ac48d7b755e6602285c490efc00e3b5d4dc Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 15:46:05 -0400 Subject: [PATCH 111/121] remove unwrap from test --- pallets/subtensor/src/tests/subnet.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index cb331e8f17..4ceeaab897 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -248,10 +248,10 @@ fn test_register_network_min_burn_at_default() { RuntimeEvent::SubtensorModule(Event::::NetworkAdded(..)) ) }) - .last() - .unwrap(); - let netuid = match min_burn_event.event { - RuntimeEvent::SubtensorModule(Event::::NetworkAdded(netuid, _)) => netuid, + .last(); + + let netuid = match min_burn_event.map(|event| event.event.clone()) { + Some(RuntimeEvent::SubtensorModule(Event::::NetworkAdded(netuid, _))) => netuid, _ => panic!("Expected NetworkAdded event"), }; From c3b3f929cd25d0c7fe29d59874c4cd1dc65b13bf Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 15:51:26 -0400 Subject: [PATCH 112/121] fix burn adjust test --- pallets/subtensor/src/tests/registration.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 50d409561d..1ae16d95c0 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -1,5 +1,6 @@ #![allow(clippy::unwrap_used)] +use approx::assert_abs_diff_eq; use frame_support::traits::Currency; use super::mock::*; @@ -535,11 +536,11 @@ fn test_burn_adjustment() { new_test_ext(1).execute_with(|| { let netuid: u16 = 1; let tempo: u16 = 13; - let burn_cost: u64 = 1000; + let init_burn_cost: u64 = InitialMinBurn::get() + 10_000; let adjustment_interval = 1; let target_registrations_per_interval = 1; add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); + SubtensorModule::set_burn(netuid, init_burn_cost); SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. SubtensorModule::set_target_registrations_per_interval( @@ -550,7 +551,7 @@ fn test_burn_adjustment() { // Register key 1. let hotkey_account_id_1 = U256::from(1); let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, init_burn_cost); assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(hotkey_account_id_1), netuid, @@ -560,7 +561,7 @@ fn test_burn_adjustment() { // Register key 2. let hotkey_account_id_2 = U256::from(2); let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, init_burn_cost); assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(hotkey_account_id_2), netuid, @@ -571,8 +572,13 @@ fn test_burn_adjustment() { // Step the block and trigger the adjustment. step_block(1); - // Check the adjusted burn. - assert_eq!(SubtensorModule::get_burn_as_u64(netuid), 1500); + // Check the adjusted burn is above the initial min burn. + assert!(SubtensorModule::get_burn_as_u64(netuid) > init_burn_cost); + assert_abs_diff_eq!( + SubtensorModule::get_burn_as_u64(netuid), + init_burn_cost.saturating_mul(3).saturating_div(2), // 1.5x + epsilon = 1000 + ); }); } From f64b0a12b0d8f9afd88e635cfd571cc78e28daf6 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:11:37 -0400 Subject: [PATCH 113/121] fix recycle alpha, allow nom usage and deduct from shares --- .../subtensor/src/staking/recycle_alpha.rs | 31 ++-- pallets/subtensor/src/tests/recycle_alpha.rs | 148 ++++++++++++++++++ 2 files changed, 166 insertions(+), 13 deletions(-) diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index cb5e740e84..f1e4003f79 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -20,20 +20,22 @@ impl Pallet { amount: u64, netuid: u16, ) -> DispatchResult { - let coldkey = ensure_signed(origin)?; + let coldkey: T::AccountId = ensure_signed(origin)?; ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); + // Ensure that the hotkey account exists this is only possible through registration. ensure!( - Self::coldkey_owns_hotkey(&coldkey, &hotkey), - Error::::NonAssociatedColdKey + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists ); + // Ensure that the hotkey has enough stake to withdraw. ensure!( - TotalHotkeyAlpha::::get(&hotkey, netuid) >= amount, + Self::has_enough_stake_on_subnet(&hotkey, &coldkey, netuid, amount), Error::::NotEnoughStakeToWithdraw ); @@ -42,19 +44,22 @@ impl Pallet { Error::::InsufficientLiquidity ); - if TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| { - *v = v.saturating_sub(amount); - *v - }) == 0 - { - TotalHotkeyAlpha::::remove(&hotkey, netuid); - } + // Deduct from the coldkey's stake. + let actual_alpha_decrease = Self::decrease_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, amount, + ); + // Recycle means we should decrease the alpha issuance tracker. SubnetAlphaOut::::mutate(netuid, |total| { - *total = total.saturating_sub(amount); + *total = total.saturating_sub(actual_alpha_decrease); }); - Self::deposit_event(Event::AlphaRecycled(coldkey, hotkey, amount, netuid)); + Self::deposit_event(Event::AlphaRecycled( + coldkey, + hotkey, + actual_alpha_decrease, + netuid, + )); Ok(()) } diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index 894a8887a4..bb81190e80 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -1,3 +1,4 @@ +use approx::assert_abs_diff_eq; use frame_support::{assert_noop, assert_ok, traits::Currency}; use sp_core::U256; @@ -44,6 +45,153 @@ fn test_recycle_success() { assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < initial_alpha + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_two_stakers() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaRecycled(..)) + ) + })); + }); +} + +#[test] +fn test_recycle_staker_is_nominator() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + // Note: this coldkey DOES NOT own the hotkey, so it is a nominator. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + // Verify the ownership + assert_ne!( + SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey), + other_coldkey + ); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let recycle_amount = stake / 2; + + // recycle from nominator coldkey + assert_ok!(SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(other_coldkey), + hotkey, + recycle_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) < initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ) < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), + stake, + epsilon = 2 + ); assert!(System::events().iter().any(|e| { matches!( From 9b5d1621006e24a846d3323e9320feab540db9e1 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:17:29 -0400 Subject: [PATCH 114/121] add failing tests --- pallets/subtensor/src/tests/recycle_alpha.rs | 142 +++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index bb81190e80..65bc4d2e15 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -242,6 +242,148 @@ fn test_burn_success() { assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_burn_staker_is_nominator() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + // Note: this coldkey DOES NOT own the hotkey, so it is a nominator. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn from nominator coldkey + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(other_coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ) < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid), + stake, + epsilon = 2 + ); + + assert!(System::events().iter().any(|e| { + matches!( + &e.event, + RuntimeEvent::SubtensorModule(Event::AlphaBurned(..)) + ) + })); + }); +} + +#[test] +fn test_burn_two_stakers() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let other_coldkey = U256::from(3); + + let owner_coldkey = U256::from(1001); + let owner_hotkey = U256::from(1002); + let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); + + let initial_balance = 1_000_000_000; + Balances::make_free_balance_be(&coldkey, initial_balance); + + // associate coldkey and hotkey + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake, netuid); + + // add some stake to other coldkey on same hotkey. + increase_stake_on_coldkey_hotkey_account(&other_coldkey, &hotkey, stake, netuid); + + // get initial total issuance and alpha out + let initial_alpha = TotalHotkeyAlpha::::get(hotkey, netuid); + let initial_net_alpha = SubnetAlphaOut::::get(netuid); + + // amount to recycle + let burn_amount = stake / 2; + + // burn from coldkey + assert_ok!(SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + burn_amount, + netuid + )); + + assert!(TotalHotkeyAlpha::::get(hotkey, netuid) < initial_alpha); + assert!(SubnetAlphaOut::::get(netuid) == initial_net_alpha); + assert!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &coldkey, netuid) + < stake + ); + // Make sure the other coldkey has no change + assert_abs_diff_eq!( + SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &other_coldkey, + netuid + ), + stake, + epsilon = 2 + ); assert!(System::events().iter().any(|e| { matches!( From 7521a527d4d05204aec6dde4415d3f567d84da11 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:24:57 -0400 Subject: [PATCH 115/121] fix impl --- .../subtensor/src/staking/recycle_alpha.rs | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index f1e4003f79..2b1d469ebb 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -89,13 +89,15 @@ impl Pallet { Error::::SubNetworkDoesNotExist ); + // Ensure that the hotkey account exists this is only possible through registration. ensure!( - Self::coldkey_owns_hotkey(&coldkey, &hotkey), - Error::::NonAssociatedColdKey + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists ); + // Ensure that the hotkey has enough stake to withdraw. ensure!( - TotalHotkeyAlpha::::get(&hotkey, netuid) >= amount, + Self::has_enough_stake_on_subnet(&hotkey, &coldkey, netuid, amount), Error::::NotEnoughStakeToWithdraw ); @@ -104,16 +106,20 @@ impl Pallet { Error::::InsufficientLiquidity ); - if TotalHotkeyAlpha::::mutate(&hotkey, netuid, |v| { - *v = v.saturating_sub(amount); - *v - }) == 0 - { - TotalHotkeyAlpha::::remove(&hotkey, netuid); - } + // Deduct from the coldkey's stake. + let actual_alpha_decrease = Self::decrease_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, &coldkey, netuid, amount, + ); + + // This is a burn, so we don't need to update AlphaOut. // Deposit event - Self::deposit_event(Event::AlphaBurned(coldkey, hotkey, amount, netuid)); + Self::deposit_event(Event::AlphaBurned( + coldkey, + hotkey, + actual_alpha_decrease, + netuid, + )); Ok(()) } From 30b5510e7174969ddfa63ad1870439145a169567 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:25:02 -0400 Subject: [PATCH 116/121] update tests --- pallets/subtensor/src/tests/recycle_alpha.rs | 30 +++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index 65bc4d2e15..cd7c81ce45 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -399,7 +399,7 @@ fn test_recycle_errors() { new_test_ext(1).execute_with(|| { let coldkey = U256::from(1); let hotkey = U256::from(2); - let wrong_coldkey = U256::from(3); + let wrong_hotkey = U256::from(3); let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); @@ -426,12 +426,12 @@ fn test_recycle_errors() { assert_noop!( SubtensorModule::recycle_alpha( - RuntimeOrigin::signed(wrong_coldkey), - hotkey, + RuntimeOrigin::signed(coldkey), + wrong_hotkey, 100_000, netuid ), - Error::::NonAssociatedColdKey + Error::::HotKeyAccountNotExists ); assert_noop!( @@ -444,8 +444,12 @@ fn test_recycle_errors() { Error::::NotEnoughStakeToWithdraw ); - // make it pass the hotkey alpha check - TotalHotkeyAlpha::::set(hotkey, netuid, SubnetAlphaOut::::get(netuid) + 1); + // make it pass the stake check + TotalHotkeyAlpha::::set( + hotkey, + netuid, + SubnetAlphaOut::::get(netuid).saturating_mul(2), + ); assert_noop!( SubtensorModule::recycle_alpha( @@ -464,7 +468,7 @@ fn test_burn_errors() { new_test_ext(1).execute_with(|| { let coldkey = U256::from(1); let hotkey = U256::from(2); - let wrong_coldkey = U256::from(3); + let wrong_hotkey = U256::from(3); let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); @@ -491,12 +495,12 @@ fn test_burn_errors() { assert_noop!( SubtensorModule::burn_alpha( - RuntimeOrigin::signed(wrong_coldkey), - hotkey, + RuntimeOrigin::signed(coldkey), + wrong_hotkey, 100_000, netuid ), - Error::::NonAssociatedColdKey + Error::::HotKeyAccountNotExists ); assert_noop!( @@ -510,7 +514,11 @@ fn test_burn_errors() { ); // make it pass the hotkey alpha check - TotalHotkeyAlpha::::set(hotkey, netuid, SubnetAlphaOut::::get(netuid) + 1); + TotalHotkeyAlpha::::set( + hotkey, + netuid, + SubnetAlphaOut::::get(netuid).saturating_mul(2), + ); assert_noop!( SubtensorModule::burn_alpha( From 7c549acb8c864b1de886e27b95913be7985cd038 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:42:48 -0400 Subject: [PATCH 117/121] add test for root netuid no burn/recyc --- pallets/subtensor/src/macros/errors.rs | 2 ++ pallets/subtensor/src/tests/recycle_alpha.rs | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index ef965bf169..3404b36d8d 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -201,5 +201,7 @@ mod errors { NeedWaitingMoreBlocksToStarCall, /// Not enough AlphaOut on the subnet to recycle NotEnoughAlphaOutToRecycle, + /// Cannot burn or recycle TAO from root subnet + CannotBurnOrRecycleOnRootSubnet, } } diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index cd7c81ce45..6224a808c0 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -424,6 +424,16 @@ fn test_recycle_errors() { Error::::SubNetworkDoesNotExist ); + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + SubtensorModule::get_root_netuid(), + ), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + assert_noop!( SubtensorModule::recycle_alpha( RuntimeOrigin::signed(coldkey), @@ -493,6 +503,16 @@ fn test_burn_errors() { Error::::SubNetworkDoesNotExist ); + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + 100_000, + SubtensorModule::get_root_netuid(), + ), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + assert_noop!( SubtensorModule::burn_alpha( RuntimeOrigin::signed(coldkey), From 094b8754aff3febafd489d82c674b4c45b6f72b6 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:43:18 -0400 Subject: [PATCH 118/121] dont burn TAO --- pallets/subtensor/src/staking/recycle_alpha.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 2b1d469ebb..b5e6762e6a 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -27,6 +27,11 @@ impl Pallet { Error::::SubNetworkDoesNotExist ); + ensure!( + netuid != Self::get_root_netuid(), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + // Ensure that the hotkey account exists this is only possible through registration. ensure!( Self::hotkey_account_exists(&hotkey), @@ -89,6 +94,11 @@ impl Pallet { Error::::SubNetworkDoesNotExist ); + ensure!( + netuid != Self::get_root_netuid(), + Error::::CannotBurnOrRecycleOnRootSubnet + ); + // Ensure that the hotkey account exists this is only possible through registration. ensure!( Self::hotkey_account_exists(&hotkey), From 5bcf61010ea22d23d41fb68639669f834e26d2a1 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:44:38 -0400 Subject: [PATCH 119/121] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ee920ef930..1aec928978 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -207,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 255, + spec_version: 256, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 0042e50c9fdcd7c1ea64cd2880465e9e2f6693b4 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 26 Mar 2025 21:47:31 -0400 Subject: [PATCH 120/121] test with root subnet creation --- pallets/subtensor/src/tests/recycle_alpha.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index 6224a808c0..b142e5d3c9 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -405,6 +405,9 @@ fn test_recycle_errors() { let subnet_owner_hotkey = U256::from(1002); let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + // Create root subnet + migrations::migrate_create_root_network::migrate_create_root_network::(); + let initial_balance = 1_000_000_000; Balances::make_free_balance_be(&coldkey, initial_balance); @@ -484,6 +487,9 @@ fn test_burn_errors() { let subnet_owner_hotkey = U256::from(1002); let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + // Create root subnet + migrations::migrate_create_root_network::migrate_create_root_network::(); + let initial_balance = 1_000_000_000; Balances::make_free_balance_be(&coldkey, initial_balance); From 34c60ef91b8746fa963617e25959fab71b58996e Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Wed, 26 Mar 2025 22:25:30 -0400 Subject: [PATCH 121/121] bump spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 1aec928978..e9ead1812f 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -207,7 +207,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 256, + spec_version: 257, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1,