Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 102 additions & 1 deletion pallets/subtensor/src/math.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,30 @@ pub fn vec_max_upscale_to_u16( vec: &Vec<I32F32> ) -> Vec<u16> {
}
}

#[allow(dead_code)]
// Max-upscale u16 vector and convert to u16 so max_value = u16::MAX. Assumes u16 vector input.
pub fn vec_u16_max_upscale_to_u16( vec: &Vec<u16> ) -> Vec<u16> {
let vec_fixed: Vec<I32F32> = vec.iter().map(|e: &u16| I32F32::from_num( *e ) ).collect();
vec_max_upscale_to_u16( &vec_fixed )
}

#[allow(dead_code)]
// Checks if u16 vector, when normalized, has a max value not greater than a u16 ratio max_limit.
pub fn check_vec_max_limited( vec: &Vec<u16>, max_limit: u16 ) -> bool {
let max_limit_fixed: I32F32 = I32F32::from_num( max_limit ) / I32F32::from_num( u16::MAX );
let mut vec_fixed: Vec<I32F32> = vec.iter().map(|e: &u16| I32F32::from_num( *e ) ).collect();
inplace_normalize( &mut vec_fixed );
let max_value: Option<&I32F32> = vec_fixed.iter().max();
match max_value {
Some(val) => {
return *val <= max_limit_fixed;
},
None => {
return true;
}
}
}

#[allow(dead_code)]
pub fn sum( x: &Vec<I32F32> ) -> I32F32 { x.iter().sum() }

Expand Down Expand Up @@ -804,7 +828,7 @@ mod tests {
assert_float_compare_64(va[i], vb[i], epsilon);
}
}

fn assert_vec_compare_u16(va: &Vec<u16>, vb: &Vec<u16>) {
assert!(va.len() == vb.len());
for i in 0..va.len(){
Expand Down Expand Up @@ -909,6 +933,83 @@ mod tests {
assert_vec_compare_u16(&result, &target);
}

#[test]
fn test_vec_u16_max_upscale_to_u16() {
let vector: Vec<u16> = vec![];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &vector);
let vector: Vec<u16> = vec![ 0 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &vector);
let vector: Vec<u16> = vec![ 0, 0 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &vector);
let vector: Vec<u16> = vec![ 1 ];
let target: Vec<u16> = vec![ 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &target);
let vector: Vec<u16> = vec![ 0, 1 ];
let target: Vec<u16> = vec![ 0, 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &target);
let vector: Vec<u16> = vec![ 65534 ];
let target: Vec<u16> = vec![ 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &target);
let vector: Vec<u16> = vec![ 65535 ];
let target: Vec<u16> = vec![ 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &target);
let vector: Vec<u16> = vec![ 65535, 65535 ];
let target: Vec<u16> = vec![ 65535, 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &target);
let vector: Vec<u16> = vec![ 0, 1, 65534 ];
let target: Vec<u16> = vec![ 0, 1, 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &target);
let vector: Vec<u16> = vec![ 0, 1, 2, 3, 4, 65533, 65535 ];
let result: Vec<u16> = vec_u16_max_upscale_to_u16( &vector );
assert_vec_compare_u16(&result, &vector);
}

#[test]
fn test_check_vec_max_limited() {
let vector: Vec<u16> = vec![];
let max_limit: u16 = 0;
assert!( check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![];
let max_limit: u16 = u16::MAX;
assert!( check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ u16::MAX ];
let max_limit: u16 = u16::MAX;
assert!( check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ u16::MAX ];
let max_limit: u16 = u16::MAX - 1;
assert!( !check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ u16::MAX ];
let max_limit: u16 = 0;
assert!( !check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ 0 ];
let max_limit: u16 = u16::MAX;
assert!( check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ 0, u16::MAX ];
let max_limit: u16 = u16::MAX;
assert!( check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ 0, u16::MAX, u16::MAX ];
let max_limit: u16 = u16::MAX / 2;
assert!( !check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ 0, u16::MAX, u16::MAX ];
let max_limit: u16 = u16::MAX / 2 + 1;
assert!( check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ 0, u16::MAX, u16::MAX, u16::MAX ];
let max_limit: u16 = u16::MAX / 3 - 1;
assert!( !check_vec_max_limited( &vector, max_limit ) );
let vector: Vec<u16> = vec![ 0, u16::MAX, u16::MAX, u16::MAX ];
let max_limit: u16 = u16::MAX / 3;
assert!( check_vec_max_limited( &vector, max_limit ) );
}

#[test]
fn test_math_fixed_overflow() {
let max_32: I32F32 = I32F32::max_value();
Expand Down
15 changes: 6 additions & 9 deletions pallets/subtensor/src/weights.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use super::*;
use crate::math::*;
use frame_support::sp_std::vec;
use sp_std::vec::Vec;

Expand Down Expand Up @@ -102,15 +103,15 @@ impl<T: Config> Pallet<T> {
// --- 12. Ensure that the weights have the required length.
ensure!( Self::check_length( netuid, neuron_uid, &uids, &values ), Error::<T>::NotSettingEnoughWeights );

// --- 13. Normalize the weights.
let normalized_values = Self::normalize_weights( values );
// --- 13. Max-upscale the weights.
let max_upscaled_weights: Vec<u16> = vec_u16_max_upscale_to_u16( &values );

// --- 14. Ensure the weights are max weight limited
ensure!( Self::max_weight_limited( netuid, neuron_uid, &uids, &normalized_values ), Error::<T>::MaxWeightExceeded );
ensure!( Self::max_weight_limited( netuid, neuron_uid, &uids, &max_upscaled_weights ), Error::<T>::MaxWeightExceeded );

// --- 15. Zip weights for sinking to storage map.
let mut zipped_weights: Vec<( u16, u16 )> = vec![];
for ( uid, val ) in uids.iter().zip(normalized_values.iter()) { zipped_weights.push((*uid, *val)) }
for ( uid, val ) in uids.iter().zip(max_upscaled_weights.iter()) { zipped_weights.push((*uid, *val)) }

// --- 16. Set weights under netuid, uid double map entry.
Weights::<T>::insert( netuid, neuron_uid, zipped_weights );
Expand Down Expand Up @@ -221,11 +222,7 @@ impl<T: Config> Pallet<T> {
if max_weight_limit == u16::MAX { return true; }

// Check if the weights max value is less than or equal to the limit.
let max: u16 = *weights.iter().max().unwrap();
if max <= max_weight_limit { return true; }

// The check has failed.
return false;
check_vec_max_limited( weights, max_weight_limit)
}

// Returns true if the uids and weights correspond to a self weight on the uid.
Expand Down
4 changes: 2 additions & 2 deletions pallets/subtensor/tests/epoch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ fn init_run_epochs(netuid: u16, n: u16, validators: &Vec<u16>, servers: &Vec<u16
else {
stake = if validators.contains(&key) { stake_per_validator } else { 0 }; // only validators receive stake
}
// let stake: u128 = 1; // alternative test: all nodes receive stake, should be same outcome, except stake
// let stake: u64 = 1; // alternative test: all nodes receive stake, should be same outcome, except stake
SubtensorModule::add_balance_to_coldkey_account( &(key as u64), stake );
SubtensorModule::append_neuron( netuid, &(key as u64), 0 );
SubtensorModule::increase_stake_on_coldkey_hotkey_account( &(key as u64), &(key as u64), stake as u64 );
Expand Down Expand Up @@ -1007,7 +1007,7 @@ fn test_validator_permits() {

// === Increase server stake above validators
for server in &servers {
SubtensorModule::add_balance_to_coldkey_account( &(*server as u64), 2*network_n as u64 );
SubtensorModule::add_balance_to_coldkey_account( &(*server as u64), 2 * network_n as u64 );
SubtensorModule::increase_stake_on_coldkey_hotkey_account( &(*server as u64), &(*server as u64), 2*network_n as u64 );
}

Expand Down
10 changes: 3 additions & 7 deletions pallets/subtensor/tests/weights.rs
Original file line number Diff line number Diff line change
Expand Up @@ -383,12 +383,8 @@ fn test_set_weights_sum_larger_than_u16_max() {
assert_ok!(result);

let all_weights: Vec<Vec<I32F32>> = SubtensorModule::get_weights(netuid);
let weights_set: Vec<u16> = all_weights[neuron_uid as usize].iter().map(|x| x.to_bits() as u16).collect();

// Should sum less than u16 max.
assert!(weights_set.iter().map(|x| *x as u64).sum::<u64>() <= (u16::MAX as u64) );

// Should be normalized to 50% each.
assert_eq!(weights_set, vec![u16::MAX/2, u16::MAX/2]);
let weights_set: &Vec<I32F32> = &all_weights[neuron_uid as usize];
assert_eq!( weights_set[0], I32F32::from_num(1) );
assert_eq!( weights_set[1], I32F32::from_num(1) );
});
}
2 changes: 1 addition & 1 deletion runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
// `spec_version`, and `authoring_version` are the same between Wasm and native.
// This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use
// the compatible custom types.
spec_version: 116,
spec_version: 117,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
Expand Down