Skip to content

Chain bloat #541

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 68 commits into from
Closed
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
68 commits
Select commit Hold shift + click to select a range
0281700
initial commit
Jun 15, 2024
c7a114b
initial commit
Jun 15, 2024
5ccb2bc
add comments
Jun 16, 2024
46b5522
add comments
Jun 16, 2024
2ca9071
add comments
Jun 16, 2024
7ff9b11
clean coinbase
Jun 16, 2024
d5680c3
clean chain bloat further
Jun 16, 2024
5d240fa
clean chain bloat further
Jun 16, 2024
8aca0bb
clean chain bloat further
Jun 16, 2024
aad5478
clean build no warnings
Jun 16, 2024
9bc4dca
fix unsafe math
open-junius Jun 17, 2024
45ea7f6
use safe math
open-junius Jun 17, 2024
d47590a
fix more unsafe math
open-junius Jun 17, 2024
231cc88
fix all unsafe math
open-junius Jun 17, 2024
70ec3c6
add todo
Jun 17, 2024
5b8141d
add todo
Jun 17, 2024
1062fb6
testable now
open-junius Jun 17, 2024
7fa36e4
tests
Jun 17, 2024
9d4de66
merge remote
Jun 17, 2024
15cc368
add child tests
Jun 17, 2024
ea9aa97
add children tests
Jun 18, 2024
3e97db1
fix compile errors
Jun 18, 2024
f1f1dc0
sd: children tests plus getters
Jun 18, 2024
2b040c8
chore: remove comment
Jun 18, 2024
083b5cd
add comments
Jun 18, 2024
7fb0cf6
Merge branch 'chain_bloat' of https://github.com/opentensor/subtensor…
Jun 19, 2024
ca41be6
add coinbase test
Jun 19, 2024
f6e2b8a
add coinbase test
Jun 19, 2024
784a28a
fix tests add new ones for chain bloat
Jun 19, 2024
d673a57
add owner cut
Jun 19, 2024
4802671
add safe math
Jun 19, 2024
25fae32
feat: revoke childkey , childkey extrinsics , tests , intial benchmar…
Jun 19, 2024
46eea19
feat: lints , tests , gettter , setters
Jun 20, 2024
deb4908
feat: benchmarking set_hotkey_emission_tempo, clippy
Jun 22, 2024
26908c3
feat: set children multiple + tests
Jun 23, 2024
b33f55a
feat: revoke multiple children + tests
Jun 23, 2024
03b1131
feat: started test_comprehensive_coinbase
Jun 23, 2024
576b20f
feat: check for self loops via dfs in children
Jun 24, 2024
7d26751
feat: full coinbase test
Jun 25, 2024
0d371bc
feat: caps , cap tests
Jun 26, 2024
b480518
feat: child_info rpc
Jun 27, 2024
2c5f6e6
chore: fix rpcs , tests
Jun 27, 2024
2368e61
chore: lint
Jun 27, 2024
6dceea5
feat: add Parents to child info
Jun 27, 2024
d85d11b
fix: sat maths
Jun 28, 2024
09ebd25
feat: enforce proportions to 1
Jul 1, 2024
71ca55e
add do_set_delegate_takes
open-junius Jul 1, 2024
8a56833
fix conflict
open-junius Jul 1, 2024
fb8231a
fix fmt
open-junius Jul 1, 2024
8535dcf
optimize the impl
open-junius Jul 1, 2024
e0f2c2b
fix clippy
open-junius Jul 1, 2024
17889ec
fix unit test
open-junius Jul 1, 2024
2de784a
Merge branch 'main' into chain_bloat
distributedstatemachine Jul 1, 2024
42763ae
fix compilation error
open-junius Jul 2, 2024
d5c5a77
fix test_get_children_info
open-junius Jul 2, 2024
ae628b8
add duplicate child check
open-junius Jul 2, 2024
bd0acfd
merge with main
open-junius Jul 5, 2024
feaa4b2
merge with main
open-junius Jul 5, 2024
122c021
fix clippy
open-junius Jul 5, 2024
85fafe6
fix clippy
open-junius Jul 5, 2024
b761d37
fix clippy
open-junius Jul 5, 2024
7641f15
fix unit test
open-junius Jul 5, 2024
feb1a42
fix justfile
open-junius Jul 5, 2024
7d68716
rebase code
open-junius Jul 10, 2024
8aaac52
trigger ci
open-junius Jul 11, 2024
2a94385
merge with main
open-junius Jul 16, 2024
aeeb039
remove commented code
open-junius Jul 16, 2024
3430b08
fix unit test
open-junius Jul 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
318 changes: 2 additions & 316 deletions pallets/subtensor/src/block_step.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
use super::*;
use frame_support::storage::IterableStorageDoubleMap;
use frame_support::storage::IterableStorageMap;
use substrate_fixed::types::I110F18;
use substrate_fixed::types::I64F64;
use substrate_fixed::types::I96F32;

impl<T: Config> Pallet<T> {
/// Executes the necessary operations for each block.
Expand All @@ -12,323 +9,12 @@ impl<T: Config> Pallet<T> {
log::debug!("block_step for block: {:?} ", block_number);
// --- 1. Adjust difficulties.
Self::adjust_registration_terms_for_networks();
// --- 2. Calculate per-subnet emissions
match Self::root_epoch(block_number) {
Ok(_) => (),
Err(e) => {
log::trace!("Error while running root epoch: {:?}", e);
}
}
// --- 3. Drains emission tuples ( hotkey, amount ).
Self::drain_emission(block_number);
// --- 4. Generates emission tuples from epoch functions.
Self::generate_emission(block_number);
// --- 2. Run emission through network.
Self::run_coinbase();
// Return ok.
Ok(())
}

/// Helper function which returns the number of blocks remaining before we will run the epoch on this
/// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0
///
pub fn blocks_until_next_epoch(netuid: u16, tempo: u16, block_number: u64) -> u64 {
// tempo | netuid | # first epoch block
// 1 0 0
// 1 1 1
// 2 0 1
// 2 1 0
// 100 0 99
// 100 1 98
// Special case: tempo = 0, the network never runs.
if tempo == 0 {
return 1000;
}
tempo as u64 - (block_number + netuid as u64 + 1) % (tempo as u64 + 1)
}

/// Helper function returns the number of tuples to drain on a particular step based on
/// the remaining tuples to sink and the block number
///
pub fn tuples_to_drain_this_block(
netuid: u16,
tempo: u16,
block_number: u64,
n_remaining: usize,
) -> usize {
let blocks_until_epoch: u64 = Self::blocks_until_next_epoch(netuid, tempo, block_number);
if blocks_until_epoch / 2 == 0 {
return n_remaining;
} // drain all.
if tempo / 2 == 0 {
return n_remaining;
} // drain all
if n_remaining == 0 {
return 0;
} // nothing to drain at all.
// Else return enough tuples to drain all within half the epoch length.
let to_sink_via_tempo: usize = n_remaining / (tempo as usize / 2);
let to_sink_via_blocks_until_epoch: usize = n_remaining / (blocks_until_epoch as usize / 2);
if to_sink_via_tempo > to_sink_via_blocks_until_epoch {
to_sink_via_tempo
} else {
to_sink_via_blocks_until_epoch
}
}

pub fn get_loaded_emission_tuples(netuid: u16) -> Option<Vec<(T::AccountId, u64, u64)>> {
LoadedEmission::<T>::get(netuid)
}

/// Reads from the loaded emission storage which contains lists of pending emission tuples ( hotkey, amount )
/// and distributes small chunks of them at a time.
///
pub fn drain_emission(_: u64) {
// --- 1. We iterate across each network.
for (netuid, _) in <Tempo<T> as IterableStorageMap<u16, u16>>::iter() {
let Some(tuples_to_drain) = Self::get_loaded_emission_tuples(netuid) else {
// There are no tuples to emit.
continue;
};
let mut total_emitted: u64 = 0;
for (hotkey, server_amount, validator_amount) in tuples_to_drain.iter() {
Self::emit_inflation_through_hotkey_account(
hotkey,
*server_amount,
*validator_amount,
);
total_emitted += *server_amount + *validator_amount;
}
LoadedEmission::<T>::remove(netuid);
TotalIssuance::<T>::put(TotalIssuance::<T>::get().saturating_add(total_emitted));
}
}

/// Iterates through networks queues more emission onto their pending storage.
/// If a network has no blocks left until tempo, we run the epoch function and generate
/// more token emission tuples for later draining onto accounts.
///
pub fn generate_emission(block_number: u64) {
// --- 1. Iterate across each network and add pending emission into stash.
for (netuid, tempo) in <Tempo<T> as IterableStorageMap<u16, u16>>::iter() {
// Skip the root network or subnets with registrations turned off
if netuid == Self::get_root_netuid() || !Self::is_registration_allowed(netuid) {
// Root emission or subnet emission is burned
continue;
}

// --- 2. Queue the emission due to this network.
let new_queued_emission: u64 = Self::get_subnet_emission_value(netuid);
log::debug!(
"generate_emission for netuid: {:?} with tempo: {:?} and emission: {:?}",
netuid,
tempo,
new_queued_emission,
);

let subnet_has_owner = SubnetOwner::<T>::contains_key(netuid);
let mut remaining = I96F32::from_num(new_queued_emission);
if subnet_has_owner {
let cut = remaining
.saturating_mul(I96F32::from_num(Self::get_subnet_owner_cut()))
.saturating_div(I96F32::from_num(u16::MAX));

remaining = remaining.saturating_sub(cut);

Self::add_balance_to_coldkey_account(
&Self::get_subnet_owner(netuid),
cut.to_num::<u64>(),
);

// We are creating tokens here from the coinbase.
Self::coinbase(cut.to_num::<u64>());
}
// --- 5. Add remaining amount to the network's pending emission.
PendingEmission::<T>::mutate(netuid, |queued| *queued += remaining.to_num::<u64>());
log::debug!(
"netuid_i: {:?} queued_emission: +{:?} ",
netuid,
new_queued_emission
);

// --- 6. Check to see if this network has reached tempo.
if Self::blocks_until_next_epoch(netuid, tempo, block_number) != 0 {
// --- 3.1 No epoch, increase blocks since last step and continue,
Self::set_blocks_since_last_step(
netuid,
Self::get_blocks_since_last_step(netuid) + 1,
);
continue;
}

// --- 7 This network is at tempo and we are running its epoch.
// First drain the queued emission.
let emission_to_drain: u64 = PendingEmission::<T>::get(netuid);
PendingEmission::<T>::insert(netuid, 0);

// --- 8. Run the epoch mechanism and return emission tuples for hotkeys in the network.
let emission_tuples_this_block: Vec<(T::AccountId, u64, u64)> =
Self::epoch(netuid, emission_to_drain);
log::debug!(
"netuid_i: {:?} emission_to_drain: {:?} ",
netuid,
emission_to_drain
);

// --- 9. Check that the emission does not exceed the allowed total.
let emission_sum: u128 = emission_tuples_this_block
.iter()
.map(|(_account_id, ve, se)| *ve as u128 + *se as u128)
.sum();
if emission_sum > emission_to_drain as u128 {
continue;
} // Saftey check.

// --- 10. Sink the emission tuples onto the already loaded.
let mut concat_emission_tuples: Vec<(T::AccountId, u64, u64)> =
emission_tuples_this_block.clone();
if let Some(mut current_emission_tuples) = Self::get_loaded_emission_tuples(netuid) {
// 10.a We already have loaded emission tuples, so we concat the new ones.
concat_emission_tuples.append(&mut current_emission_tuples);
}
LoadedEmission::<T>::insert(netuid, concat_emission_tuples);

// --- 11 Set counters.
Self::set_blocks_since_last_step(netuid, 0);
Self::set_last_mechanism_step_block(netuid, block_number);
}
}
/// Distributes token inflation through the hotkey based on emission. The call ensures that the inflation
/// is distributed onto the accounts in proportion of the stake delegated minus the take. This function
/// is called after an epoch to distribute the newly minted stake according to delegation.
///
pub fn emit_inflation_through_hotkey_account(
hotkey: &T::AccountId,
server_emission: u64,
validator_emission: u64,
) {
// --- 1. Check if the hotkey is a delegate. If not, we simply pass the stake through to the
// coldkey - hotkey account as normal.
if !Self::hotkey_is_delegate(hotkey) {
Self::increase_stake_on_hotkey_account(hotkey, server_emission + validator_emission);
return;
}
// Then this is a delegate, we distribute validator_emission, then server_emission.

// --- 2. The hotkey is a delegate. We first distribute a proportion of the validator_emission to the hotkey
// directly as a function of its 'take'
let total_hotkey_stake: u64 = Self::get_total_stake_for_hotkey(hotkey);
let delegate_take: u64 =
Self::calculate_delegate_proportional_take(hotkey, validator_emission);
let validator_emission_minus_take: u64 = validator_emission - delegate_take;
let mut remaining_validator_emission: u64 = validator_emission_minus_take;

// 3. -- The remaining emission goes to the owners in proportion to the stake delegated.
for (owning_coldkey_i, stake_i) in
<Stake<T> as IterableStorageDoubleMap<T::AccountId, T::AccountId, u64>>::iter_prefix(
hotkey,
)
{
// --- 4. The emission proportion is remaining_emission * ( stake / total_stake ).
let stake_proportion: u64 = Self::calculate_stake_proportional_emission(
stake_i,
total_hotkey_stake,
validator_emission_minus_take,
);
Self::increase_stake_on_coldkey_hotkey_account(
&owning_coldkey_i,
hotkey,
stake_proportion,
);
log::debug!(
"owning_coldkey_i: {:?} hotkey: {:?} emission: +{:?} ",
owning_coldkey_i,
hotkey,
stake_proportion
);
remaining_validator_emission -= stake_proportion;
}

// --- 5. Last increase final account balance of delegate after 4, since 5 will change the stake proportion of
// the delegate and effect calculation in 4.
Self::increase_stake_on_hotkey_account(
hotkey,
delegate_take + remaining_validator_emission,
);
log::debug!("delkey: {:?} delegate_take: +{:?} ", hotkey, delegate_take);
// Also emit the server_emission to the hotkey
// The server emission is distributed in-full to the delegate owner.
// We do this after 4. for the same reason as above.
Self::increase_stake_on_hotkey_account(hotkey, server_emission);
}

/// Increases the stake on the cold - hot pairing by increment while also incrementing other counters.
/// This function should be called rather than set_stake under account.
///
pub fn block_step_increase_stake_on_coldkey_hotkey_account(
coldkey: &T::AccountId,
hotkey: &T::AccountId,
increment: u64,
) {
TotalColdkeyStake::<T>::mutate(coldkey, |old| old.saturating_add(increment));
TotalHotkeyStake::<T>::insert(
hotkey,
TotalHotkeyStake::<T>::get(hotkey).saturating_add(increment),
);
Stake::<T>::insert(
hotkey,
coldkey,
Stake::<T>::get(hotkey, coldkey).saturating_add(increment),
);
TotalStake::<T>::put(TotalStake::<T>::get().saturating_add(increment));
}

/// Decreases the stake on the cold - hot pairing by the decrement while decreasing other counters.
///
pub fn block_step_decrease_stake_on_coldkey_hotkey_account(
coldkey: &T::AccountId,
hotkey: &T::AccountId,
decrement: u64,
) {
TotalColdkeyStake::<T>::mutate(coldkey, |old| old.saturating_sub(decrement));
TotalHotkeyStake::<T>::insert(
hotkey,
TotalHotkeyStake::<T>::get(hotkey).saturating_sub(decrement),
);
Stake::<T>::insert(
hotkey,
coldkey,
Stake::<T>::get(hotkey, coldkey).saturating_sub(decrement),
);
TotalStake::<T>::put(TotalStake::<T>::get().saturating_sub(decrement));
}

/// Returns emission awarded to a hotkey as a function of its proportion of the total stake.
///
pub fn calculate_stake_proportional_emission(
stake: u64,
total_stake: u64,
emission: u64,
) -> u64 {
if total_stake == 0 {
return 0;
};
let stake_proportion: I64F64 = I64F64::from_num(stake) / I64F64::from_num(total_stake);
let proportional_emission: I64F64 = I64F64::from_num(emission) * stake_proportion;
proportional_emission.to_num::<u64>()
}

/// Returns the delegated stake 'take' assigned to this key. (If exists, otherwise 0)
///
pub fn calculate_delegate_proportional_take(hotkey: &T::AccountId, emission: u64) -> u64 {
if Self::hotkey_is_delegate(hotkey) {
let take_proportion: I64F64 =
I64F64::from_num(Delegates::<T>::get(hotkey)) / I64F64::from_num(u16::MAX);
let take_emission: I64F64 = take_proportion * I64F64::from_num(emission);
take_emission.to_num::<u64>()
} else {
0
}
}

/// Adjusts the network difficulties/burns of every active network. Resetting state parameters.
///
pub fn adjust_registration_terms_for_networks() {
Expand Down
Loading