Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Hotfix/dont max upscale bonds #1439

Closed
wants to merge 13 commits into from
21 changes: 11 additions & 10 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

38 changes: 38 additions & 0 deletions pallets/subtensor/src/epoch/math.rs
Original file line number Diff line number Diff line change
@@ -549,6 +549,24 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec<I32F32>]) {
});
}

// Apply column mask to matrix, mask=true will mask out, i.e. set to 0.
// Assumes each column has the same length.
#[allow(dead_code)]
pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec<I32F32>]) {
let Some(first_row) = matrix.first() else {
return;
};
assert_eq!(mask.len(), first_row.len());
let zero: I32F32 = I32F32::saturating_from_num(0);
matrix.iter_mut().for_each(|row_elem| {
row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| {
if *mask_col {
*elem = zero;
}
});
});
}

// Mask out the diagonal of the input matrix in-place.
#[allow(dead_code)]
pub fn inplace_mask_diag(matrix: &mut [Vec<I32F32>]) {
@@ -674,6 +692,26 @@ pub fn vec_mask_sparse_matrix(
result
}

// Remove cells from sparse matrix where the mask function of a scalar and a vector is true.
#[allow(dead_code, clippy::indexing_slicing)]
pub fn scalar_vec_mask_sparse_matrix(
sparse_matrix: &[Vec<(u16, I32F32)>],
scalar: u64,
vector: &[u64],
mask_fn: &dyn Fn(u64, u64) -> bool,
) -> Vec<Vec<(u16, I32F32)>> {
let n: usize = sparse_matrix.len();
let mut result: Vec<Vec<(u16, I32F32)>> = vec![vec![]; n];
for (i, sparse_row) in sparse_matrix.iter().enumerate() {
for (j, value) in sparse_row {
if !mask_fn(scalar, vector[*j as usize]) {
result[i].push((*j, *value));
}
}
}
result
}

// Row-wise matrix-vector hadamard product.
#[allow(dead_code)]
pub fn row_hadamard(matrix: &[Vec<I32F32>], vector: &[I32F32]) -> Vec<Vec<I32F32>> {
62 changes: 34 additions & 28 deletions pallets/subtensor/src/epoch/run_epoch.rs
Original file line number Diff line number Diff line change
@@ -22,6 +22,10 @@ impl<T: Config> Pallet<T> {
let current_block: u64 = Self::get_current_block_as_u64();
log::trace!("current_block:\n{:?}\n", current_block);

// Get tempo.
let tempo: u64 = Self::get_tempo(netuid).into();
log::trace!("tempo:\n{:?}\n", tempo);

// Get activity cutoff.
let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64;
log::trace!("activity_cutoff:\n{:?}\n", activity_cutoff);
@@ -44,7 +48,7 @@ impl<T: Config> Pallet<T> {
let block_at_registration: Vec<u64> = Self::get_block_at_registration(netuid);
log::trace!("Block at registration:\n{:?}\n", &block_at_registration);

// Outdated matrix, updated_ij=True if i has last updated (weights) after j has last registered.
// Outdated matrix, outdated_ij=True if i has last updated (weights) after j has last registered.
let outdated: Vec<Vec<bool>> = last_update
.iter()
.map(|updated| {
@@ -56,6 +60,16 @@ impl<T: Config> Pallet<T> {
.collect();
log::trace!("Outdated:\n{:?}\n", &outdated);

// Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered.
// Mask if: the last tempo block happened *before* the registration block
// ==> last_tempo <= registered
let last_tempo: u64 = current_block.saturating_sub(tempo);
let recently_registered: Vec<bool> = block_at_registration
.iter()
.map(|registered| last_tempo <= *registered)
.collect();
log::trace!("Recently registered:\n{:?}\n", &recently_registered);

// ===========
// == Stake ==
// ===========
@@ -185,17 +199,16 @@ impl<T: Config> Pallet<T> {

// Access network bonds.
let mut bonds: Vec<Vec<I32F32>> = Self::get_bonds(netuid);
inplace_mask_matrix(&outdated, &mut bonds); // mask outdated bonds
inplace_col_normalize(&mut bonds); // sum_i b_ij = 1
log::trace!("B:\n{:?}\n", &bonds);

// Remove bonds referring to neurons that have registered since last tempo.
inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds

// Compute bonds delta column normalized.
let mut bonds_delta: Vec<Vec<I32F32>> = row_hadamard(&weights_for_bonds, &active_stake); // ΔB = W◦S
inplace_col_normalize(&mut bonds_delta); // sum_i b_ij = 1
let bonds_delta: Vec<Vec<I32F32>> = row_hadamard(&weights_for_bonds, &active_stake); // ΔB = W◦S
log::trace!("ΔB:\n{:?}\n", &bonds_delta);

// Compute the Exponential Moving Average (EMA) of bonds.
let mut ema_bonds = Self::compute_ema_bonds(netuid, consensus.clone(), bonds_delta, bonds);
inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1
let ema_bonds = Self::compute_ema_bonds(netuid, consensus.clone(), bonds_delta, bonds);
log::trace!("emaB:\n{:?}\n", &ema_bonds);

// Compute dividends: d_i = SUM(j) b_ij * inc_j
@@ -326,8 +339,6 @@ impl<T: Config> Pallet<T> {
ValidatorTrust::<T>::insert(netuid, cloned_validator_trust);
ValidatorPermit::<T>::insert(netuid, new_validator_permits.clone());

// Column max-upscale EMA bonds for storage: max_i w_ij = 1.
inplace_col_max_upscale(&mut ema_bonds);
new_validator_permits
.iter()
.zip(validator_permits)
@@ -386,6 +397,10 @@ impl<T: Config> Pallet<T> {
let current_block: u64 = Self::get_current_block_as_u64();
log::trace!("current_block: {:?}", current_block);

// Get tempo.
let tempo: u64 = Self::get_tempo(netuid).into();
log::trace!("tempo: {:?}", tempo);

// Get activity cutoff.
let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64;
log::trace!("activity_cutoff: {:?}", activity_cutoff);
@@ -548,33 +563,26 @@ impl<T: Config> Pallet<T> {
let mut bonds: Vec<Vec<(u16, I32F32)>> = Self::get_bonds_sparse(netuid);
log::trace!("B: {:?}", &bonds);

// Remove bonds referring to deregistered neurons.
bonds = vec_mask_sparse_matrix(
// Remove bonds referring to neurons that have registered since last tempo.
// Mask if: the last tempo block happened *before* the registration block
// ==> last_tempo <= registered
let last_tempo: u64 = current_block.saturating_sub(tempo);
bonds = scalar_vec_mask_sparse_matrix(
&bonds,
&last_update,
last_tempo,
&block_at_registration,
&|updated, registered| updated <= registered,
&|last_tempo, registered| last_tempo <= registered,
);
log::trace!("B (outdatedmask): {:?}", &bonds);

// Normalize remaining bonds: sum_i b_ij = 1.
inplace_col_normalize_sparse(&mut bonds, n);
log::trace!("B (mask+norm): {:?}", &bonds);

// Compute bonds delta column normalized.
let mut bonds_delta: Vec<Vec<(u16, I32F32)>> =
let bonds_delta: Vec<Vec<(u16, I32F32)>> =
row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked)
log::trace!("ΔB: {:?}", &bonds_delta);

// Normalize bonds delta.
inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1
log::trace!("ΔB (norm): {:?}", &bonds_delta);

// Compute the Exponential Moving Average (EMA) of bonds.
let mut ema_bonds =
let ema_bonds =
Self::compute_ema_bonds_sparse(netuid, consensus.clone(), bonds_delta, bonds);
// Normalize EMA bonds.
inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1
log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds);

// Compute dividends: d_i = SUM(j) b_ij * inc_j.
@@ -712,8 +720,6 @@ impl<T: Config> Pallet<T> {
ValidatorTrust::<T>::insert(netuid, cloned_validator_trust);
ValidatorPermit::<T>::insert(netuid, new_validator_permits.clone());

// Column max-upscale EMA bonds for storage: max_i w_ij = 1.
inplace_col_max_upscale_sparse(&mut ema_bonds, n);
new_validator_permits
.iter()
.zip(validator_permits)
4 changes: 3 additions & 1 deletion pallets/subtensor/src/macros/hooks.rs
Original file line number Diff line number Diff line change
@@ -81,7 +81,9 @@ mod hooks {
// Remove Stake map entries
.saturating_add(migrations::migrate_remove_stake_map::migrate_remove_stake_map::<T>())
// Remove unused maps entries
.saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::<T>());
.saturating_add(migrations::migrate_remove_unused_maps_and_values::migrate_remove_unused_maps_and_values::<T>())
// Reset Bonds
.saturating_add(migrations::migrate_reset_bonds::migrate_reset_bonds::<T>());
weight
}

54 changes: 54 additions & 0 deletions pallets/subtensor/src/migrations/migrate_reset_bonds.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
use super::*;
use frame_support::weights::Weight;
use log;
use scale_info::prelude::string::String;

pub fn migrate_reset_bonds<T: Config>() -> Weight {
use frame_support::traits::Get;
let migration_name = b"migrate_reset_bonds".to_vec();

// Start counting weight
let mut weight = T::DbWeight::get().reads(1);

// Check if we already ran this migration
if HasMigrationRun::<T>::get(&migration_name) {
log::info!(
target: "runtime",
"Migration '{:?}' has already run. Skipping.",
String::from_utf8_lossy(&migration_name)
);
return weight;
}

log::info!(
target: "runtime",
"Running migration '{}'",
String::from_utf8_lossy(&migration_name)
);

// ===== Migration Body =====
// Clear all bonds
let mut curr = Bonds::<T>::clear(u32::MAX, None);
weight = weight
.saturating_add(T::DbWeight::get().reads_writes(curr.loops as u64, curr.unique as u64));
while curr.maybe_cursor.is_some() {
curr = Bonds::<T>::clear(u32::MAX, curr.maybe_cursor.as_deref());
weight = weight
.saturating_add(T::DbWeight::get().reads_writes(curr.loops as u64, curr.unique as u64));
}

// ===== Migration End =====
// -----------------------------
// Mark the migration as done
// -----------------------------
HasMigrationRun::<T>::insert(&migration_name, true);
weight = weight.saturating_add(T::DbWeight::get().writes(1));

log::info!(
target: "runtime",
"Migration '{}' completed successfully.",
String::from_utf8_lossy(&migration_name)
);

weight
}
1 change: 1 addition & 0 deletions pallets/subtensor/src/migrations/mod.rs
Original file line number Diff line number Diff line change
@@ -11,6 +11,7 @@ pub mod migrate_populate_owned_hotkeys;
pub mod migrate_rao;
pub mod migrate_remove_stake_map;
pub mod migrate_remove_unused_maps_and_values;
pub mod migrate_reset_bonds;
pub mod migrate_set_min_burn;
pub mod migrate_set_min_difficulty;
pub mod migrate_stake_threshold;
1 change: 1 addition & 0 deletions pallets/subtensor/src/subnets/uids.rs
Original file line number Diff line number Diff line change
@@ -23,6 +23,7 @@ impl<T: Config> Pallet<T> {
Consensus::<T>::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0));
Incentive::<T>::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0));
Dividends::<T>::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0));
Bonds::<T>::remove(netuid, neuron_uid); // Remove bonds for Validator.
}

/// Replace the neuron under this uid.
39 changes: 39 additions & 0 deletions pallets/subtensor/src/tests/math.rs
Original file line number Diff line number Diff line change
@@ -1220,6 +1220,45 @@ fn test_math_vec_mask_sparse_matrix() {
);
}

#[test]
fn test_math_scalar_vec_mask_sparse_matrix() {
let vector: Vec<f32> = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.];
let target: Vec<f32> = vec![0., 2., 3., 0., 5., 6., 0., 8., 9.];
let mat = vec_to_sparse_mat_fixed(&vector, 3, false);
let scalar: u64 = 1;
let masking_vector: Vec<u64> = vec![1, 4, 7];
let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a == b);
assert_sparse_mat_compare(
&result,
&vec_to_sparse_mat_fixed(&target, 3, false),
I32F32::from_num(0),
);

let vector: Vec<f32> = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.];
let target: Vec<f32> = vec![1., 2., 0., 4., 5., 0., 7., 8., 0.];
let mat = vec_to_sparse_mat_fixed(&vector, 3, false);
let scalar: u64 = 5;
let masking_vector: Vec<u64> = vec![1, 4, 7];
let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a <= b);
assert_sparse_mat_compare(
&result,
&vec_to_sparse_mat_fixed(&target, 3, false),
I32F32::from_num(0),
);

let vector: Vec<f32> = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.];
let target: Vec<f32> = vec![0., 0., 3., 0., 0., 6., 0., 0., 9.];
let mat = vec_to_sparse_mat_fixed(&vector, 3, false);
let scalar: u64 = 5;
let masking_vector: Vec<u64> = vec![1, 4, 7];
let result = scalar_vec_mask_sparse_matrix(&mat, scalar, &masking_vector, &|a, b| a >= b);
assert_sparse_mat_compare(
&result,
&vec_to_sparse_mat_fixed(&target, 3, false),
I32F32::from_num(0),
);
}

#[test]
fn test_math_row_hadamard() {
let vector: Vec<I32F32> = vec_to_fixed(&[1., 2., 3., 4.]);
Loading
Oops, something went wrong.
Loading
Oops, something went wrong.