From 37f7b284d308e550affe8e4b1234108804c1cb33 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 16:59:38 -0400 Subject: [PATCH 1/9] Cleanup math: Remove recursion, indexing, assertions, and zipping. Fix get_last_update. --- pallets/subtensor/src/epoch/math.rs | 1325 ++++++++++---------- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- pallets/subtensor/src/subnets/mechanism.rs | 9 + pallets/subtensor/src/tests/math.rs | 183 +-- pallets/subtensor/src/utils/misc.rs | 12 +- 5 files changed, 653 insertions(+), 878 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 11930bf26e..6288ac14ae 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -1,96 +1,79 @@ // we get a compiler warning for this , even though the trait is used in the // quantile function. use crate::alloc::borrow::ToOwned; -#[allow(unused)] -use num_traits::float::Float; use safe_math::*; -use sp_runtime::traits::{CheckedAdd, Saturating}; -use sp_std::cmp::Ordering; +use sp_runtime::traits::CheckedAdd; use sp_std::vec; use substrate_fixed::transcendental::{exp, ln}; use substrate_fixed::types::{I32F32, I64F64}; -// TODO: figure out what cfg gate this needs to not be a warning in rustc -#[allow(unused)] use sp_std::vec::Vec; -#[allow(dead_code)] +pub fn get_safe(slice: &[T], idx: usize) -> T { + slice.get(idx).copied().unwrap_or_default() +} + pub fn fixed(val: f32) -> I32F32 { I32F32::saturating_from_num(val) } -#[allow(dead_code)] pub fn fixed_to_u16(x: I32F32) -> u16 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed_to_u64(x: I32F32) -> u64 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed64_to_u64(x: I64F64) -> u64 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed64_to_fixed32(x: I64F64) -> I32F32 { I32F32::saturating_from_num(x) } -#[allow(dead_code)] pub fn fixed32_to_fixed64(x: I32F32) -> I64F64 { I64F64::saturating_from_num(x) } -#[allow(dead_code)] pub fn u16_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x) } -#[allow(dead_code)] pub fn u16_proportion_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x).safe_div(I32F32::saturating_from_num(u16::MAX)) } -#[allow(dead_code)] pub fn fixed_to_fixed_u16_proportion(x: I32F32) -> I32F32 { x.safe_div(I32F32::saturating_from_num(u16::MAX)) } -#[allow(dead_code)] pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { fixed_to_u16(x.saturating_mul(I32F32::saturating_from_num(u16::MAX))) } -#[allow(dead_code)] pub fn vec_fixed32_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed_to_u64).collect() } -#[allow(dead_code)] pub fn vec_fixed64_to_fixed32(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_fixed32).collect() } -#[allow(dead_code)] pub fn vec_fixed32_to_fixed64(vec: Vec) -> Vec { vec.into_iter().map(fixed32_to_fixed64).collect() } -#[allow(dead_code)] pub fn vec_fixed64_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_u64).collect() } -#[allow(dead_code)] pub fn vec_fixed_proportions_to_u16(vec: Vec) -> Vec { vec.into_iter().map(fixed_proportion_to_u16).collect() } -#[allow(dead_code)] // Max-upscale vector and convert to u16 so max_value = u16::MAX. Assumes non-negative normalized input. pub fn vec_max_upscale_to_u16(vec: &[I32F32]) -> Vec { let u16_max: I32F32 = I32F32::saturating_from_num(u16::MAX); @@ -136,7 +119,6 @@ pub fn vec_max_upscale_to_u16(vec: &[I32F32]) -> Vec { } } -#[allow(dead_code)] // Max-upscale u16 vector and convert to u16 so max_value = u16::MAX. Assumes u16 vector input. pub fn vec_u16_max_upscale_to_u16(vec: &[u16]) -> Vec { let vec_fixed: Vec = vec @@ -146,7 +128,6 @@ pub fn vec_u16_max_upscale_to_u16(vec: &[u16]) -> Vec { vec_max_upscale_to_u16(&vec_fixed) } -#[allow(dead_code)] // Checks if u16 vector, when normalized, has a max value not greater than a u16 ratio max_limit. pub fn check_vec_max_limited(vec: &[u16], max_limit: u16) -> bool { let max_limit_fixed: I32F32 = @@ -160,12 +141,10 @@ pub fn check_vec_max_limited(vec: &[u16], max_limit: u16) -> bool { max_value.is_none_or(|v| *v <= max_limit_fixed) } -#[allow(dead_code)] pub fn sum(x: &[I32F32]) -> I32F32 { x.iter().sum() } -#[allow(dead_code)] // Sums a Vector of type that has CheckedAdd trait. // Returns None if overflow occurs during sum using T::checked_add. // Returns Some(T::default()) if input vector is empty. @@ -184,14 +163,12 @@ where } // Return true when vector sum is zero. -#[allow(dead_code)] pub fn is_zero(vector: &[I32F32]) -> bool { let vector_sum: I32F32 = sum(vector); vector_sum == I32F32::saturating_from_num(0) } // Exp safe function with I32F32 output of I32F32 input. -#[allow(dead_code)] pub fn exp_safe(input: I32F32) -> I32F32 { let min_input: I32F32 = I32F32::saturating_from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 let max_input: I32F32 = I32F32::saturating_from_num(20); // <= exp(20) = 485 165 195,4097903 @@ -218,7 +195,6 @@ pub fn exp_safe(input: I32F32) -> I32F32 { } // Sigmoid safe function with I32F32 output of I32F32 input with offset kappa and (recommended) scaling 0 < rho <= 40. -#[allow(dead_code)] pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { let one: I32F32 = I32F32::saturating_from_num(1); let offset: I32F32 = input.saturating_sub(kappa); // (input - kappa) @@ -231,7 +207,6 @@ pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { } // Returns a bool vector where an item is true if the vector item is in topk values. -#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vec![true; n]; @@ -239,15 +214,16 @@ pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort for &idx in idxs.iter().take(n.saturating_sub(k)) { - result[idx] = false; + if let Some(cell) = result.get_mut(idx) { + *cell = false; + } } result } // Returns a bool vector where an item is true if the vector item is in topk values and is non-zero. -#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vector.iter().map(|&elem| elem != I32F32::from(0)).collect(); @@ -255,15 +231,16 @@ pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort (no indexing) for &idx in idxs.iter().take(n.saturating_sub(k)) { - result[idx] = false; + if let Some(cell) = result.get_mut(idx) { + *cell = false; // no indexing + } } result } // Returns a normalized (sum to 1 except 0) copy of the input vector. -#[allow(dead_code)] pub fn normalize(x: &[I32F32]) -> Vec { let x_sum: I32F32 = sum(x); if x_sum != I32F32::saturating_from_num(0.0_f32) { @@ -274,7 +251,6 @@ pub fn normalize(x: &[I32F32]) -> Vec { } // Normalizes (sum to 1 except 0) the input vector directly in-place. -#[allow(dead_code)] pub fn inplace_normalize(x: &mut [I32F32]) { let x_sum: I32F32 = x.iter().sum(); if x_sum == I32F32::saturating_from_num(0.0_f32) { @@ -285,7 +261,6 @@ pub fn inplace_normalize(x: &mut [I32F32]) { } // Normalizes (sum to 1 except 0) the input vector directly in-place, using the sum arg. -#[allow(dead_code)] pub fn inplace_normalize_using_sum(x: &mut [I32F32], x_sum: I32F32) { if x_sum == I32F32::saturating_from_num(0.0_f32) { return; @@ -295,7 +270,6 @@ pub fn inplace_normalize_using_sum(x: &mut [I32F32], x_sum: I32F32) { } // Normalizes (sum to 1 except 0) the I64F64 input vector directly in-place. -#[allow(dead_code)] pub fn inplace_normalize_64(x: &mut [I64F64]) { let x_sum: I64F64 = x.iter().sum(); if x_sum == I64F64::saturating_from_num(0) { @@ -306,7 +280,6 @@ pub fn inplace_normalize_64(x: &mut [I64F64]) { } /// Normalizes (sum to 1 except 0) each row (dim=0) of a I64F64 matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize_64(x: &mut [Vec]) { for row in x { let row_sum: I64F64 = row.iter().sum(); @@ -318,23 +291,18 @@ pub fn inplace_row_normalize_64(x: &mut [Vec]) { } /// Returns x / y for input vectors x and y, if y == 0 return 0. -#[allow(dead_code)] pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { - assert_eq!(x.len(), y.len()); - x.iter() - .zip(y) - .map(|(x_i, y_i)| { - if *y_i != 0 { - x_i.safe_div(*y_i) - } else { - I32F32::saturating_from_num(0) - } - }) - .collect() + let zero = I32F32::saturating_from_num(0); + + let mut out = Vec::with_capacity(x.len()); + for (i, x_i) in x.iter().enumerate() { + let y_i = y.get(i).copied().unwrap_or(zero); + out.push(x_i.safe_div(y_i)); + } + out } // Normalizes (sum to 1 except 0) each row (dim=0) of a matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize(x: &mut [Vec]) { for row in x { let row_sum: I32F32 = row.iter().sum(); @@ -346,7 +314,6 @@ pub fn inplace_row_normalize(x: &mut [Vec]) { } // Normalizes (sum to 1 except 0) each row (dim=0) of a sparse matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { for sparse_row in sparse_matrix.iter_mut() { let row_sum: I32F32 = sparse_row.iter().map(|(_j, value)| *value).sum(); @@ -359,7 +326,6 @@ pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { } // Sum across each row (dim=0) of a matrix. -#[allow(dead_code)] pub fn row_sum(x: &[Vec]) -> Vec { if let Some(first_row) = x.first() { if first_row.is_empty() { @@ -370,7 +336,6 @@ pub fn row_sum(x: &[Vec]) -> Vec { } // Sum across each row (dim=0) of a sparse matrix. -#[allow(dead_code)] pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { sparse_matrix .iter() @@ -378,213 +343,205 @@ pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { .collect() } -// Sum across each column (dim=1) of a matrix. -#[allow(dead_code)] -pub fn col_sum(x: &[Vec]) -> Vec { - let Some(first_row) = x.first() else { - return vec![]; - }; - let cols = first_row.len(); - if cols == 0 { - return vec![]; - } - x.iter().fold( - vec![I32F32::saturating_from_num(0); cols], - |acc, next_row| { - acc.into_iter() - .zip(next_row) - .map(|(acc_elem, next_elem)| acc_elem.saturating_add(*next_elem)) - .collect() - }, - ) -} - -// Sum across each column (dim=1) of a sparse matrix. -#[allow(dead_code, clippy::indexing_slicing)] -pub fn col_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>], columns: u16) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0); columns as usize]; - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - result[*j as usize] = result[*j as usize].saturating_add(*value); - } - } - result -} - // Normalizes (sum to 1 except 0) each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], columns: u16) { - let mut col_sum: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; // assume square matrix, rows=cols + let zero = I32F32::saturating_from_num(0.0); + let mut col_sum: Vec = vec![zero; columns as usize]; + + // Pass 1: accumulate column sums. for sparse_row in sparse_matrix.iter() { - for (j, value) in sparse_row.iter() { - col_sum[*j as usize] = col_sum[*j as usize].saturating_add(*value); + for &(j, value) in sparse_row.iter() { + if let Some(sum) = col_sum.get_mut(j as usize) { + *sum = sum.saturating_add(value); + } } } - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - if col_sum[*j as usize] == I32F32::saturating_from_num(0.0_f32) { - continue; + + // Pass 2: normalize by column sums where non-zero. + for sparse_row in sparse_matrix.iter_mut() { + for (j, value) in sparse_row.iter_mut() { + let denom = col_sum.get(*j as usize).copied().unwrap_or(zero); + if denom != zero { + *value = value.safe_div(denom); } - *value = value.safe_div(col_sum[*j as usize]); } } } // Normalizes (sum to 1 except 0) each column (dim=1) of a matrix in-place. -#[allow(dead_code)] +// If a row is shorter/longer than the accumulator, pad with zeroes accordingly. pub fn inplace_col_normalize(x: &mut [Vec]) { - let Some(first_row) = x.first() else { - return; - }; - if first_row.is_empty() { + let zero = I32F32::saturating_from_num(0.0); + + // Build column sums; treat missing entries as zero, but don't modify rows. + let mut col_sums: Vec = Vec::new(); + for row in x.iter() { + if col_sums.len() < row.len() { + col_sums.resize(row.len(), zero); + } + let mut sums_it = col_sums.iter_mut(); + for v in row.iter() { + if let Some(sum) = sums_it.next() { + *sum = sum.saturating_add(*v); + } else { + break; + } + } + } + + if col_sums.is_empty() { return; } - let cols = first_row.len(); - let col_sums = x - .iter_mut() - .fold(vec![I32F32::saturating_from_num(0.0); cols], |acc, row| { - row.iter_mut() - .zip(acc) - .map(|(&mut m_val, acc_val)| acc_val.saturating_add(m_val)) - .collect() - }); - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(&col_sums) - .filter(|(_, col_sum)| **col_sum != I32F32::saturating_from_num(0_f32)) - .for_each(|(m_val, col_sum)| { - *m_val = m_val.safe_div(*col_sum); - }); - }); + + // Normalize only existing elements in each row. + for row in x.iter_mut() { + let mut sums_it = col_sums.iter(); + for m in row.iter_mut() { + if let Some(sum) = sums_it.next() { + if *sum != zero { + *m = m.safe_div(*sum); + } + } else { + break; + } + } + } } // Max-upscale each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], columns: u16) { - let mut col_max: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; // assume square matrix, rows=cols + let zero = I32F32::saturating_from_num(0.0); + let mut col_max: Vec = vec![zero; columns as usize]; + + // Pass 1: compute per-column max for sparse_row in sparse_matrix.iter() { for (j, value) in sparse_row.iter() { - if col_max[*j as usize] < *value { - col_max[*j as usize] = *value; + if let Some(m) = col_max.get_mut(*j as usize) { + if *m < *value { + *m = *value; + } } } } - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - if col_max[*j as usize] == I32F32::saturating_from_num(0.0_f32) { - continue; + + // Pass 2: divide each nonzero entry by its column max + for sparse_row in sparse_matrix.iter_mut() { + for (j, value) in sparse_row.iter_mut() { + let m = col_max.get(*j as usize).copied().unwrap_or(zero); + if m != zero { + *value = value.safe_div(m); } - *value = value.safe_div(col_max[*j as usize]); } } } // Max-upscale each column (dim=1) of a matrix in-place. -#[allow(dead_code)] pub fn inplace_col_max_upscale(x: &mut [Vec]) { - let Some(first_row) = x.first() else { - return; - }; - if first_row.is_empty() { + let zero = I32F32::saturating_from_num(0.0); + + // Find the widest row to size the column-max buffer; don't modify rows. + let max_cols = x.iter().map(|r| r.len()).max().unwrap_or(0); + if max_cols == 0 { return; } - let cols = first_row.len(); - let col_maxes = x.iter_mut().fold( - vec![I32F32::saturating_from_num(0_f32); cols], - |acc, row| { - row.iter_mut() - .zip(acc) - .map(|(m_val, acc_val)| acc_val.max(*m_val)) - .collect() - }, - ); - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(&col_maxes) - .filter(|(_, col_max)| **col_max != I32F32::saturating_from_num(0)) - .for_each(|(m_val, col_max)| { - *m_val = m_val.safe_div(*col_max); - }); - }); + + // Pass 1: compute per-column maxima across existing entries only. + let mut col_maxes = vec![zero; max_cols]; + for row in x.iter() { + let mut max_it = col_maxes.iter_mut(); + for v in row.iter() { + if let Some(m) = max_it.next() { + if *m < *v { + *m = *v; + } + } else { + break; + } + } + } + + // Pass 2: divide each existing entry by its column max (if non-zero). + for row in x.iter_mut() { + let mut max_it = col_maxes.iter(); + for val in row.iter_mut() { + if let Some(&m) = max_it.next() { + if m != zero { + *val = val.safe_div(m); + } + } else { + break; + } + } + } } // Apply mask to vector, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { if mask.is_empty() { return; } - assert_eq!(mask.len(), vector.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - mask.iter() - .zip(vector) - .filter(|(m, _)| **m) - .for_each(|(_, v_elem)| { - *v_elem = zero; - }); + for (i, v) in vector.iter_mut().enumerate() { + if *mask.get(i).unwrap_or(&true) { + *v = zero; + } + } } // Apply mask to matrix, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] -pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut Vec>) { +pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { let Some(first_row) = mask.first() else { return; }; if first_row.is_empty() { return; } - assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - mask.iter().zip(matrix).for_each(|(mask_row, matrix_row)| { - mask_row - .iter() - .zip(matrix_row) - .filter(|(mask_elem, _)| **mask_elem) - .for_each(|(_, matrix_elem)| { - *matrix_elem = zero; - }); - }); + for (r, row) in matrix.iter_mut().enumerate() { + let mask_row_opt = mask.get(r); + for (c, val) in row.iter_mut().enumerate() { + let should_zero = mask_row_opt + .and_then(|mr| mr.get(c)) + .copied() + .unwrap_or(true); + if should_zero { + *val = zero; + } + } + } } // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { let Some(first_row) = matrix.first() else { return; }; let cols = first_row.len(); - assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::saturating_from_num(0); - matrix - .iter_mut() - .zip(mask) - .for_each(|(row_elem, mask_row)| { - if *mask_row { - *row_elem = vec![zero; cols]; - } - }); + for (r, row) in matrix.iter_mut().enumerate() { + if mask.get(r).copied().unwrap_or(true) { + *row = vec![zero; cols]; + } + } } // Apply column mask to matrix, mask=true will mask out, i.e. set to 0. // Assumes each column has the same length. -#[allow(dead_code)] pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { - let Some(first_row) = matrix.first() else { + if matrix.is_empty() { return; }; - assert_eq!(mask.len(), first_row.len()); let zero: I32F32 = I32F32::saturating_from_num(0); - matrix.iter_mut().for_each(|row_elem| { - row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| { - if *mask_col { + for row in matrix.iter_mut() { + for (c, elem) in row.iter_mut().enumerate() { + if mask.get(c).copied().unwrap_or(true) { *elem = zero; } - }); - }); + } + } } // Mask out the diagonal of the input matrix in-place. -#[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut [Vec]) { let Some(first_row) = matrix.first() else { return; @@ -592,7 +549,18 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { if first_row.is_empty() { return; } - assert_eq!(matrix.len(), first_row.len()); + // Weights that we use this function for are always a square matrix. + // If something not square is passed to this function, it's safe to return + // with no action. Log error if this happens. + if matrix.len() != first_row.len() { + log::error!( + "inplace_mask_diag: matrix.len {:?} != first_row.len {:?}", + matrix.len(), + first_row.len() + ); + return; + } + let zero: I32F32 = I32F32::saturating_from_num(0.0); matrix.iter_mut().enumerate().for_each(|(idx, row)| { let Some(elem) = row.get_mut(idx) else { @@ -604,27 +572,29 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { } // Remove cells from sparse matrix where the mask function of a scalar and a vector is true. -#[allow(dead_code, clippy::indexing_slicing)] pub fn scalar_vec_mask_sparse_matrix( sparse_matrix: &[Vec<(u16, I32F32)>], scalar: u64, vector: &[u64], mask_fn: &dyn Fn(u64, u64) -> bool, ) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row { - if !mask_fn(scalar, vector[*j as usize]) { - result[i].push((*j, *value)); + let mut result: Vec> = Vec::with_capacity(sparse_matrix.len()); + + for row in sparse_matrix.iter() { + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, value) in row.iter() { + let vj = vector.get(j as usize).copied().unwrap_or(0); + if !mask_fn(scalar, vj) { + out_row.push((j, value)); } } + result.push(out_row); } + result } // Mask out the diagonal of the input matrix in-place, except for the diagonal entry at except_index. -#[allow(dead_code)] pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: u16) { let Some(first_row) = matrix.first() else { return; @@ -632,7 +602,10 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: if first_row.is_empty() { return; } - assert_eq!(matrix.len(), first_row.len()); + if matrix.len() != first_row.len() { + log::error!("inplace_mask_diag_except_index: input matrix is not square"); + return; + } let diag_at_index = matrix .get(except_index as usize) @@ -651,26 +624,22 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: } // Return a new sparse matrix that replaces masked rows with an empty vector placeholder. -#[allow(dead_code)] pub fn mask_rows_sparse( mask: &[bool], sparse_matrix: &[Vec<(u16, I32F32)>], ) -> Vec> { - assert_eq!(sparse_matrix.len(), mask.len()); - mask.iter() - .zip(sparse_matrix) - .map(|(mask_elem, sparse_row)| { - if *mask_elem { - vec![] - } else { - sparse_row.clone() - } - }) - .collect() + let mut out = Vec::with_capacity(sparse_matrix.len()); + for (i, sparse_row) in sparse_matrix.iter().enumerate() { + if mask.get(i).copied().unwrap_or(true) { + out.push(Vec::new()); + } else { + out.push(sparse_row.clone()); + } + } + out } // Return a new sparse matrix with a masked out diagonal of input sparse matrix. -#[allow(dead_code)] pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec> { sparse_matrix .iter() @@ -687,7 +656,6 @@ pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec], except_index: u16, @@ -709,27 +677,29 @@ pub fn mask_diag_sparse_except_index( } // Remove cells from sparse matrix where the mask function of two vectors is true. -#[allow(dead_code, clippy::indexing_slicing)] pub fn vec_mask_sparse_matrix( sparse_matrix: &[Vec<(u16, I32F32)>], first_vector: &[u64], second_vector: &[u64], mask_fn: &dyn Fn(u64, u64) -> bool, ) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row { - if !mask_fn(first_vector[i], second_vector[*j as usize]) { - result[i].push((*j, *value)); + let mut result: Vec> = Vec::with_capacity(sparse_matrix.len()); + let mut fv_it = first_vector.iter(); + for row in sparse_matrix.iter() { + let fv = fv_it.next().copied().unwrap_or(0); + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, val) in row.iter() { + let sv = second_vector.get(j as usize).copied().unwrap_or(0); + if !mask_fn(fv, sv) { + out_row.push((j, val)); } } + result.push(out_row); } result } // Row-wise matrix-vector hadamard product. -#[allow(dead_code)] pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec> { let Some(first_row) = matrix.first() else { return vec![vec![]]; @@ -737,37 +707,43 @@ pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], ) -> Vec> { - sparse_matrix - .iter() - .zip(vector) - .map(|(sparse_row, vec_val)| { - sparse_row - .iter() - .map(|(j, value)| (*j, value.saturating_mul(*vec_val))) - .collect() - }) - .collect() + let mut out = Vec::with_capacity(sparse_matrix.len()); + let mut vec_it = vector.iter(); + + for sparse_row in sparse_matrix.iter() { + let Some(&scale) = vec_it.next() else { break }; + let mut new_row = Vec::with_capacity(sparse_row.len()); + for &(j, val) in sparse_row.iter() { + new_row.push((j, val.saturating_mul(scale))); + } + out.push(new_row); + } + + out } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { let Some(first_row) = matrix.first() else { return vec![]; @@ -776,52 +752,30 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { if cols == 0 { return vec![]; } - assert!(matrix.len() == vector.len()); - matrix.iter().zip(vector).fold( - vec![I32F32::saturating_from_num(0_f32); cols], - |acc, (row, vec_val)| { - row.iter() - .zip(acc) - .map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val.saturating_add(vec_val.saturating_mul(*m_val)) - }) - .collect() - }, - ) -} -// Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] -pub fn matmul_64(matrix: &[Vec], vector: &[I64F64]) -> Vec { - let Some(first_row) = matrix.first() else { - return vec![]; - }; - let cols = first_row.len(); - if cols == 0 { - return vec![]; + let zero = I32F32::saturating_from_num(0.0); + let mut acc = vec![zero; cols]; + + let mut vec_it = vector.iter(); + for row in matrix.iter() { + // Use 0 if the vector ran out (rows beyond vector length contribute nothing). + let scale = vec_it.next().copied().unwrap_or(zero); + + let mut acc_it = acc.iter_mut(); + for m_val in row.iter() { + if let Some(a) = acc_it.next() { + *a = a.saturating_add(scale.saturating_mul(*m_val)); + } else { + // Ignore elements beyond the accumulator width (first row’s length). + break; + } + } } - assert!(matrix.len() == vector.len()); - matrix.iter().zip(vector).fold( - vec![I64F64::saturating_from_num(0.0); cols], - |acc, (row, vec_val)| { - row.iter() - .zip(acc) - .map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val.saturating_add(vec_val.saturating_mul(*m_val)) - }) - .collect() - }, - ) + + acc } // Column-wise matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code)] pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec { let Some(first_row) = matrix.first() else { return vec![]; @@ -829,143 +783,112 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], columns: u16, ) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - result[*j as usize] = - result[*j as usize].saturating_add(vector[i].saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let mut result = vec![zero; columns as usize]; + + let mut vec_it = vector.iter(); + for row in sparse_matrix.iter() { + let scale = vec_it.next().copied().unwrap_or(zero); + for &(j, val) in row.iter() { + if let Some(r) = result.get_mut(j as usize) { + *r = r.saturating_add(scale.saturating_mul(val)); + } } } + result } // Column-wise sparse_matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code, clippy::indexing_slicing)] pub fn matmul_transpose_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], vector: &[I32F32], ) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0.0); sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - // Compute dividends: d_j = SUM(i) b_ji * inc_i - // result_j = SUM(i) vector_i * matrix_ji - // result_i = SUM(j) vector_j * matrix_ij - result[i] = result[i].saturating_add(vector[*j as usize].saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let mut result = vec![zero; sparse_matrix.len()]; + + let mut out_it = result.iter_mut(); + for row in sparse_matrix.iter() { + let Some(out_cell) = out_it.next() else { break }; + let mut acc = zero; + for &(j, val) in row.iter() { + let v = vector.get(j as usize).copied().unwrap_or(zero); + acc = acc.saturating_add(v.saturating_mul(val)); } + *out_cell = acc; } + result } // Set inplace matrix values above column threshold to threshold value. -#[allow(dead_code)] pub fn inplace_col_clip(x: &mut [Vec], col_threshold: &[I32F32]) { - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(col_threshold) - .for_each(|(value, threshold)| { - *value = *threshold.min(value); - }); - }); + for row in x.iter_mut() { + let mut thr_it = col_threshold.iter(); + for value in row.iter_mut() { + if let Some(th) = thr_it.next() { + // Clip: value = min(value, threshold) + *value = *th.min(&*value); + } else { + // No more thresholds; stop for this row. + break; + } + } + } } // Return sparse matrix with values above column threshold set to threshold value. -#[allow(dead_code, clippy::indexing_slicing)] pub fn col_clip_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], col_threshold: &[I32F32], ) -> Vec> { - let mut result: Vec> = vec![vec![]; sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - if col_threshold[*j as usize] < *value { - if 0 < col_threshold[*j as usize] { - result[i].push((*j, col_threshold[*j as usize])); + let zero = I32F32::saturating_from_num(0.0); + let mut result = Vec::with_capacity(sparse_matrix.len()); + + for row in sparse_matrix.iter() { + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, val) in row.iter() { + let th = col_threshold.get(j as usize).copied().unwrap_or(zero); + if th < val { + if th > zero { + // clip down to threshold, but drop if threshold <= 0 + out_row.push((j, th)); } } else { - result[i].push((*j, *value)); + // keep original + out_row.push((j, val)); } } + result.push(out_row); } - result -} -// Set matrix values below threshold to lower, and equal-above to upper. -#[allow(dead_code)] -pub fn clip( - x: &[Vec], - threshold: I32F32, - upper: I32F32, - lower: I32F32, -) -> Vec> { - x.iter() - .map(|row| { - row.iter() - .map(|elem| if *elem >= threshold { upper } else { lower }) - .collect() - }) - .collect() -} - -// Set inplace matrix values below threshold to lower, and equal-above to upper. -#[allow(dead_code)] -pub fn inplace_clip(x: &mut [Vec], threshold: I32F32, upper: I32F32, lower: I32F32) { - x.iter_mut().for_each(|row| { - row.iter_mut().for_each(|elem| { - *elem = if *elem >= threshold { upper } else { lower }; - }); - }); -} - -// Set sparse matrix values below threshold to lower, and equal-above to upper. -// Does not add missing elements (0 value assumed) when lower!=0. -#[allow(dead_code)] -pub fn clip_sparse( - sparse_matrix: &[Vec<(u16, I32F32)>], - threshold: I32F32, - upper: I32F32, - lower: I32F32, -) -> Vec> { - sparse_matrix - .iter() - .map(|row| { - row.iter() - .map(|(j, value)| { - if *value < threshold { - (*j, lower) - } else { - (*j, upper) - } - }) - .collect() - }) - .collect() + result } // Stake-weighted median score finding algorithm, based on a mid pivot binary search. @@ -995,144 +918,199 @@ pub fn clip_sparse( // * 'median': ( I32F32 ): // - median via random pivot binary search. // -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median( stake: &[I32F32], score: &[I32F32], partition_idx: &[usize], minority: I32F32, - partition_lo: I32F32, - partition_hi: I32F32, + mut partition_lo: I32F32, + mut partition_hi: I32F32, ) -> I32F32 { - let n = partition_idx.len(); - if n == 0 { - return I32F32::saturating_from_num(0); - } - if n == 1 { - return score[partition_idx[0]]; + let zero = I32F32::saturating_from_num(0.0); + if stake.len() != score.len() { + log::error!( + "weighted_median stake and score have different lengths: {:?} != {:?}", + stake.len(), + score.len() + ); + return zero; } - assert!(stake.len() == score.len()); - let mid_idx: usize = n.safe_div(2); - let pivot: I32F32 = score[partition_idx[mid_idx]]; - let mut lo_stake: I32F32 = I32F32::saturating_from_num(0); - let mut hi_stake: I32F32 = I32F32::saturating_from_num(0); + let mut current_partition_index: Vec = partition_idx.to_vec(); + let mut iteration_counter: usize = 0; + let iteration_limit = partition_idx.len(); let mut lower: Vec = vec![]; let mut upper: Vec = vec![]; - for &idx in partition_idx { - if score[idx] == pivot { - continue; + + loop { + let n = current_partition_index.len(); + if n == 0 { + return zero; } - if score[idx] < pivot { - lo_stake = lo_stake.saturating_add(stake[idx]); - lower.push(idx); - } else { - hi_stake = hi_stake.saturating_add(stake[idx]); - upper.push(idx); + if n == 1 { + if let Some(&only_idx) = current_partition_index.first() { + return get_safe::(score, only_idx); + } else { + return zero; + } } - } - if (partition_lo.saturating_add(lo_stake) <= minority) - && (minority < partition_hi.saturating_sub(hi_stake)) - { - return pivot; - } else if (minority < partition_lo.saturating_add(lo_stake)) && (!lower.is_empty()) { - return weighted_median( - stake, - score, - &lower, - minority, - partition_lo, - partition_lo.saturating_add(lo_stake), - ); - } else if (partition_hi.saturating_sub(hi_stake) <= minority) && (!upper.is_empty()) { - return weighted_median( - stake, + let mid_idx: usize = n.safe_div(2); + let pivot: I32F32 = get_safe::( score, - &upper, - minority, - partition_hi.saturating_sub(hi_stake), - partition_hi, + current_partition_index.get(mid_idx).copied().unwrap_or(0), ); + let mut lo_stake: I32F32 = I32F32::saturating_from_num(0); + let mut hi_stake: I32F32 = I32F32::saturating_from_num(0); + + for idx in current_partition_index.clone() { + if get_safe::(score, idx) == pivot { + continue; + } + if get_safe::(score, idx) < pivot { + lo_stake = lo_stake.saturating_add(get_safe::(stake, idx)); + lower.push(idx); + } else { + hi_stake = hi_stake.saturating_add(get_safe::(stake, idx)); + upper.push(idx); + } + } + if (minority < partition_lo.saturating_add(lo_stake)) && (!lower.is_empty()) { + current_partition_index = lower.clone(); + partition_hi = partition_lo.saturating_add(lo_stake); + } else if (partition_hi.saturating_sub(hi_stake) <= minority) && (!upper.is_empty()) { + current_partition_index = upper.clone(); + partition_lo = partition_hi.saturating_sub(hi_stake); + } else { + return pivot; + } + + lower.clear(); + upper.clear(); + + // Safety limit: We should never need more than iteration_limit iterations. + iteration_counter = iteration_counter.saturating_add(1); + if iteration_counter > iteration_limit { + break; + } } - pivot + zero } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col( stake: &[I32F32], score: &[Vec], majority: I32F32, ) -> Vec { - let rows = stake.len(); - let columns = score[0].len(); - let zero: I32F32 = I32F32::saturating_from_num(0); - let mut median: Vec = vec![zero; columns]; - - #[allow(clippy::needless_range_loop)] - for c in 0..columns { - let mut use_stake: Vec = vec![]; - let mut use_score: Vec = vec![]; - for r in 0..rows { - assert_eq!(columns, score[r].len()); - if stake[r] > zero { - use_stake.push(stake[r]); - use_score.push(score[r][c]); + let zero = I32F32::saturating_from_num(0.0); + + // Determine number of columns from the first row (no indexing). + let columns = score.first().map(|r| r.len()).unwrap_or(0); + let mut median = vec![zero; columns]; + + // Iterate columns without indexing into `median`. + let mut c = 0usize; + for med_cell in median.iter_mut() { + let mut use_stake: Vec = Vec::new(); + let mut use_score: Vec = Vec::new(); + + // Iterate rows aligned with `stake` length; avoid indexing into `stake`/`score`. + let mut r = 0usize; + while r < stake.len() { + let st = get_safe::(stake, r); + if st > zero { + // Fetch row safely; if it's missing or has wrong width, push zeros to both. + if let Some(row) = score.get(r) { + if row.len() == columns { + let val = row.get(c).copied().unwrap_or(zero); + use_stake.push(st); + use_score.push(val); + } else { + use_stake.push(zero); + use_score.push(zero); + } + } else { + // Missing row: insert zeroes. + use_stake.push(zero); + use_score.push(zero); + } } + r = r.saturating_add(1); } + if !use_stake.is_empty() { inplace_normalize(&mut use_stake); let stake_sum: I32F32 = use_stake.iter().sum(); let minority: I32F32 = stake_sum.saturating_sub(majority); - median[c] = weighted_median( + + let idxs: Vec = (0..use_stake.len()).collect(); + *med_cell = weighted_median( &use_stake, &use_score, - (0..use_stake.len()).collect::>().as_slice(), + idxs.as_slice(), minority, zero, stake_sum, ); } + + c = c.saturating_add(1); } median } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col_sparse( stake: &[I32F32], score: &[Vec<(u16, I32F32)>], columns: u16, majority: I32F32, ) -> Vec { - let rows = stake.len(); - let zero: I32F32 = I32F32::saturating_from_num(0); + let zero = I32F32::saturating_from_num(0.0); + + // Keep only positive-stake rows; normalize them. let mut use_stake: Vec = stake.iter().copied().filter(|&s| s > zero).collect(); inplace_normalize(&mut use_stake); + let stake_sum: I32F32 = use_stake.iter().sum(); - let stake_idx: Vec = (0..use_stake.len()).collect(); let minority: I32F32 = stake_sum.saturating_sub(majority); - let mut use_score: Vec> = vec![vec![zero; use_stake.len()]; columns as usize]; - let mut median: Vec = vec![zero; columns as usize]; + let stake_idx: Vec = (0..use_stake.len()).collect(); + + // use_score: columns x use_stake.len(), prefilled with zeros. + let mut use_score: Vec> = (0..columns as usize) + .map(|_| vec![zero; use_stake.len()]) + .collect(); + + // Fill use_score by walking stake and score together, counting positives with k. let mut k: usize = 0; - for r in 0..rows { - if stake[r] <= zero { - continue; - } - for (c, val) in score[r].iter() { - use_score[*c as usize][k] = *val; + let mut stake_it = stake.iter(); + let mut score_it = score.iter(); + + while let (Some(&s), Some(sparse_row)) = (stake_it.next(), score_it.next()) { + if s > zero { + for &(c, val) in sparse_row.iter() { + if let Some(col_vec) = use_score.get_mut(c as usize) { + if let Some(cell) = col_vec.get_mut(k) { + *cell = val; + } + } + } + k = k.saturating_add(1); } - k.saturating_inc(); } - for c in 0..columns as usize { - median[c] = weighted_median( + + // Compute weighted median per column without indexing. + let mut median: Vec = Vec::with_capacity(columns as usize); + for col_vec in use_score.iter() { + median.push(weighted_median( &use_stake, - &use_score[c], - &stake_idx, + col_vec, + stake_idx.as_slice(), minority, zero, stake_sum, - ); + )); } + median } @@ -1140,34 +1118,51 @@ pub fn weighted_median_col_sparse( // ratio has intended range [0, 1] // ratio=0: Result = A // ratio=1: Result = B -#[allow(dead_code)] pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> Vec> { - if ratio == I32F32::saturating_from_num(0) { + if ratio == I32F32::saturating_from_num(0.0) { return mat1.to_owned(); } - if ratio == I32F32::saturating_from_num(1) { + if ratio == I32F32::saturating_from_num(1.0) { return mat2.to_owned(); } - assert!(mat1.len() == mat2.len()); - if mat1.is_empty() { - return vec![vec![]; 1]; - } - if mat1.first().unwrap_or(&vec![]).is_empty() { - return vec![vec![]; 1]; + if mat1.is_empty() || mat1.first().map(|r| r.is_empty()).unwrap_or(true) { + return vec![vec![]]; } - let mut result: Vec> = - vec![ - vec![I32F32::saturating_from_num(0); mat1.first().unwrap_or(&vec![]).len()]; - mat1.len() - ]; - for (i, (row1, row2)) in mat1.iter().zip(mat2.iter()).enumerate() { - assert!(row1.len() == row2.len()); - for (j, (&v1, &v2)) in row1.iter().zip(row2.iter()).enumerate() { - if let Some(res) = result.get_mut(i).unwrap_or(&mut vec![]).get_mut(j) { - *res = v1.saturating_add(ratio.saturating_mul(v2.saturating_sub(v1))); - } + + let zero = I32F32::saturating_from_num(0.0); + let cols = mat1.first().map(|r| r.len()).unwrap_or(0); + + // Pre-size result to mat1's shape (row count = mat1.len(), col count = first row of mat1). + let mut result: Vec> = { + let mut out = Vec::with_capacity(mat1.len()); + for _ in mat1.iter() { + out.push(vec![zero; cols]); + } + out + }; + + // Walk rows of mat1, mat2, and result in lockstep; stop when any iterator ends. + let mut m2_it = mat2.iter(); + let mut out_it = result.iter_mut(); + + for row1 in mat1.iter() { + let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else { + break; + }; + + // Walk elements of row1, row2, and out_row in lockstep; stop at the shortest. + let mut r1_it = row1.iter(); + let mut r2_it = row2.iter(); + let mut out_cell_it = out_row.iter_mut(); + + while let (Some(v1), Some(v2), Some(out_cell)) = + (r1_it.next(), r2_it.next(), out_cell_it.next()) + { + *out_cell = (*v1).saturating_add(ratio.saturating_mul((*v2).saturating_sub(*v1))); } + // Any remaining cells in `out_row` (beyond min row length) stay as zero (pre-filled). } + result } @@ -1175,7 +1170,6 @@ pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> // ratio has intended range [0, 1] // ratio=0: Result = A // ratio=1: Result = B -#[allow(dead_code)] pub fn interpolate_sparse( mat1: &[Vec<(u16, I32F32)>], mat2: &[Vec<(u16, I32F32)>], @@ -1188,7 +1182,10 @@ pub fn interpolate_sparse( if ratio == I32F32::saturating_from_num(1) { return mat2.to_owned(); } - assert!(mat1.len() == mat2.len()); + if mat1.len() != mat2.len() { + // In case if sizes mismatch, return clipped weights + return mat2.to_owned(); + } let rows = mat1.len(); let zero: I32F32 = I32F32::saturating_from_num(0); let mut result: Vec> = vec![vec![]; rows]; @@ -1224,12 +1221,16 @@ pub fn interpolate_sparse( } // Element-wise product of two vectors. -#[allow(dead_code)] pub fn vec_mul(a: &[I32F32], b: &[I32F32]) -> Vec { - a.iter() - .zip(b.iter()) - .map(|(x, y)| x.checked_mul(*y).unwrap_or_default()) - .collect() + let mut out = Vec::with_capacity(core::cmp::min(a.len(), b.len())); + let mut ai = a.iter(); + let mut bi = b.iter(); + + while let (Some(x), Some(y)) = (ai.next(), bi.next()) { + out.push(x.checked_mul(*y).unwrap_or_default()); + } + + out } // Element-wise product of matrix and vector @@ -1240,11 +1241,15 @@ pub fn mat_vec_mul(matrix: &[Vec], vector: &[I32F32]) -> Vec if first_row.is_empty() { return vec![vec![]]; } - matrix.iter().map(|row| vec_mul(row, vector)).collect() + + let mut out = Vec::with_capacity(matrix.len()); + for row in matrix.iter() { + out.push(vec_mul(row, vector)); + } + out } // Element-wise product of matrix and vector -#[allow(dead_code)] pub fn mat_vec_mul_sparse( matrix: &[Vec<(u16, I32F32)>], vector: &[I32F32], @@ -1265,58 +1270,6 @@ pub fn mat_vec_mul_sparse( result } -// Element-wise product of two matrices. -#[allow(dead_code)] -pub fn hadamard(mat1: &[Vec], mat2: &[Vec]) -> Vec> { - assert!(mat1.len() == mat2.len()); - let Some(first_row) = mat1.first() else { - return vec![vec![]]; - }; - if first_row.is_empty() { - return vec![vec![]]; - } - mat1.iter() - .zip(mat2) - .map(|(row1, row2)| { - assert!(row1.len() == row2.len()); - row1.iter() - .zip(row2) - .map(|(elem1, elem2)| elem1.saturating_mul(*elem2)) - .collect() - }) - .collect() -} - -// Element-wise product of two sparse matrices. -#[allow(dead_code, clippy::indexing_slicing)] -pub fn hadamard_sparse( - mat1: &[Vec<(u16, I32F32)>], - mat2: &[Vec<(u16, I32F32)>], - columns: u16, -) -> Vec> { - assert!(mat1.len() == mat2.len()); - let rows = mat1.len(); - let zero: I32F32 = I32F32::saturating_from_num(0); - let mut result: Vec> = vec![vec![]; rows]; - for i in 0..rows { - let mut row1: Vec = vec![zero; columns as usize]; - for (j, value) in mat1[i].iter() { - row1[*j as usize] = row1[*j as usize].saturating_add(*value); - } - let mut row2: Vec = vec![zero; columns as usize]; - for (j, value) in mat2[i].iter() { - row2[*j as usize] = row2[*j as usize].saturating_add(*value); - } - for j in 0..columns as usize { - let prod: I32F32 = row1[j].saturating_mul(row2[j]); - if zero < prod { - result[i].push((j as u16, prod)) - } - } - } - result -} - /// Clamp the input value between high and low. /// Note: assumes high > low pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { @@ -1334,7 +1287,6 @@ pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { // Return matrix exponential moving average: `alpha * a_ij + one_minus_alpha * b_ij`. // `alpha` is the EMA coefficient, how much to add of the new observation, typically small, // higher alpha discounts older observations faster. -#[allow(dead_code)] pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec> { let Some(first_row) = new.first() else { return vec![vec![]]; @@ -1342,214 +1294,199 @@ pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec], old: &[Vec<(u16, I32F32)>], alpha: I32F32, ) -> Vec> { - assert!(new.len() == old.len()); - let n = new.len(); // assume square matrix, rows=cols - let zero: I32F32 = I32F32::saturating_from_num(0.0); - let one_minus_alpha: I32F32 = I32F32::saturating_from_num(1.0).saturating_sub(alpha); - let mut result: Vec> = vec![vec![]; n]; - for i in 0..new.len() { - let mut row: Vec = vec![zero; n]; - for (j, value) in new[i].iter() { - row[*j as usize] = row[*j as usize].saturating_add(alpha.saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha); + + let n = new.len(); // assume square (rows = cols) + if n == 0 { + return Vec::new(); + } + + let mut result: Vec> = Vec::with_capacity(n); + let mut old_it = old.iter(); + + for new_row in new.iter() { + let mut acc_row = vec![zero; n]; + + // Add alpha * new + for &(j, v) in new_row.iter() { + if let Some(cell) = acc_row.get_mut(j as usize) { + *cell = cell.saturating_add(alpha.saturating_mul(v)); + } } - for (j, value) in old[i].iter() { - row[*j as usize] = - row[*j as usize].saturating_add(one_minus_alpha.saturating_mul(*value)); + + // Add (1 - alpha) * old + if let Some(orow) = old_it.next() { + for &(j, v) in orow.iter() { + if let Some(cell) = acc_row.get_mut(j as usize) { + *cell = cell.saturating_add(one_minus_alpha.saturating_mul(v)); + } + } } - for (j, value) in row.iter().enumerate() { - if *value > zero { - result[i].push((j as u16, *value)) + + // Densified row -> sparse (keep positives) + let mut out_row: Vec<(u16, I32F32)> = Vec::new(); + for (j, &val) in acc_row.iter().enumerate() { + if val > zero { + out_row.push((j as u16, val)); } } + + result.push(out_row); } + result } /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. -#[allow(dead_code)] pub fn mat_ema_alpha_sparse( new: &[Vec<(u16, I32F32)>], old: &[Vec<(u16, I32F32)>], alpha: &[Vec], ) -> Vec> { - // Ensure dimensions match. - assert!(new.len() == old.len()); - assert!(new.len() == alpha.len()); + // If shapes don't match, just return `new` + if new.len() != old.len() || new.len() != alpha.len() { + return new.to_owned(); + } - // The output vector of rows. - let mut result: Vec> = Vec::with_capacity(new.len()); - let zero: I32F32 = I32F32::saturating_from_num(0.0); + let zero = I32F32::saturating_from_num(0.0); let one = I32F32::saturating_from_num(1.0); - // Iterate over each row of the matrices. - for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { - // Initialize a row of zeros for the result matrix. - let mut decayed_values: Vec = vec![zero; alpha_row.len()]; + let mut result: Vec> = Vec::with_capacity(new.len()); + let mut old_it = old.iter(); + let mut alf_it = alpha.iter(); - let mut result_row: Vec<(u16, I32F32)> = Vec::new(); + for new_row in new.iter() { + let Some(old_row) = old_it.next() else { break }; + let Some(alpha_row) = alf_it.next() else { + break; + }; - // Process the old matrix values. - for (j, old_val) in old_row.iter() { - if let (Some(alpha_val), Some(decayed_val)) = ( - alpha_row.get(*j as usize), - decayed_values.get_mut(*j as usize), + // Densified accumulator sized to alpha_row length (columns outside are ignored). + let mut decayed_values = vec![zero; alpha_row.len()]; + + // Apply (1 - alpha_j) * old_ij into accumulator. + for &(j, old_val) in old_row.iter() { + if let (Some(&a), Some(cell)) = ( + alpha_row.get(j as usize), + decayed_values.get_mut(j as usize), ) { - // Calculate the complement of the alpha value - let one_minus_alpha = one.saturating_sub(*alpha_val); - // Bonds_decayed = Bonds * (1 - alpha) - *decayed_val = one_minus_alpha.saturating_mul(*old_val); + *cell = one.saturating_sub(a).saturating_mul(old_val); } } - // Process the new matrix values. - for (j, new_val) in new_row.iter() { - if let (Some(alpha_val), Some(decayed_val)) = - (alpha_row.get(*j as usize), decayed_values.get(*j as usize)) + // Add alpha_j * new_ij, clamp to [0, 1], and emit sparse entries > 0. + let mut out_row: Vec<(u16, I32F32)> = Vec::new(); + for &(j, new_val) in new_row.iter() { + if let (Some(&a), Some(&decayed)) = + (alpha_row.get(j as usize), decayed_values.get(j as usize)) { - // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap - // Validators allocate their purchase across miners based on weights - let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); - let result_val = decayed_val.saturating_add(purchase_increment).min(one); - - if result_val > zero { - result_row.push((*j, result_val)); + let inc = a.saturating_mul(new_val).max(zero); + let val = decayed.saturating_add(inc).min(one); + if val > zero { + out_row.push((j, val)); } } } - result.push(result_row); + + result.push(out_row); } - // Return the computed EMA sparse matrix. result } /// Calculates the exponential moving average (EMA) for a dense matrix using dynamic alpha values. -#[allow(dead_code)] pub fn mat_ema_alpha( new: &[Vec], // Weights old: &[Vec], // Bonds alpha: &[Vec], ) -> Vec> { - // Check if the new matrix is empty or its first row is empty. - if new.is_empty() || new.first().is_none_or(|row| row.is_empty()) { - return vec![vec![]; 1]; + // Empty or degenerate input + if new.is_empty() || new.first().map(|r| r.is_empty()).unwrap_or(true) { + return vec![vec![]]; } - // Ensure the dimensions of the new, old and alpha matrices match. - assert!(new.len() == old.len()); - assert!(new.len() == alpha.len()); - - // Initialize the result matrix with zeros, having the same dimensions as the new matrix. - let zero: I32F32 = I32F32::saturating_from_num(0.0); - let one = I32F32::saturating_from_num(1.0); - - let mut result: Vec> = Vec::with_capacity(new.len()); - - // Iterate over each row of the matrices. - for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { - assert!(new_row.len() == old_row.len()); - assert!(new_row.len() == alpha_row.len()); - let mut result_row: Vec = Vec::new(); - - // Iterate over each column of the current row. - for j in 0..new_row.len() { - // Compute the EMA for the current element using saturating operations. - if let (Some(new_val), Some(old_val), Some(alpha_val)) = - (new_row.get(j), old_row.get(j), alpha_row.get(j)) - { - // Calculate the complement of the alpha value - let one_minus_alpha = one.saturating_sub(*alpha_val); - - // Bonds_decayed = Bonds * (1 - alpha) - let decayed_val = one_minus_alpha.saturating_mul(*old_val); + // If outer dimensions don't match, return bonds unchanged + if new.len() != old.len() || new.len() != alpha.len() { + return old.to_owned(); + } - // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap - // Validators allocate their purchase across miners based on weights - let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); - let result_val = decayed_val.saturating_add(purchase_increment).min(one); - result_row.push(result_val); - } + // Ensure each corresponding row has matching length; otherwise return `new` unchanged. + let mut old_it = old.iter(); + let mut alp_it = alpha.iter(); + for nrow in new.iter() { + let (Some(orow), Some(arow)) = (old_it.next(), alp_it.next()) else { + return new.to_owned(); + }; + if nrow.len() != orow.len() || nrow.len() != arow.len() { + return new.to_owned(); } - result.push(result_row); } - // Return the computed EMA matrix. - result -} -/// Return the quantile of a vector of I32F32 values. -pub fn quantile(data: &[I32F32], quantile: f64) -> I32F32 { - // Clone the input data to avoid modifying the original vector. - let mut sorted_data = data.to_owned(); - - // Sort the cloned data in ascending order, handling potential NaN values. - sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); - - // Get the length of the sorted data. - let len = sorted_data.len(); + let zero = I32F32::saturating_from_num(0.0); + let one = I32F32::saturating_from_num(1.0); - // If the data is empty, return 0 as the quantile value. - if len == 0 { - return I32F32::saturating_from_num(0); - } + // Compute EMA: result = (1 - α) * old + α * new, clamped to [0, 1]. + let mut out: Vec> = Vec::with_capacity(new.len()); + let mut old_it = old.iter(); + let mut alp_it = alpha.iter(); - // Calculate the position in the sorted array corresponding to the quantile. - let pos = quantile * (len.saturating_sub(1)) as f64; + for nrow in new.iter() { + let (Some(orow), Some(arow)) = (old_it.next(), alp_it.next()) else { + break; + }; - // Determine the lower index by flooring the position. - let low = pos.floor() as usize; + let mut r: Vec = Vec::with_capacity(nrow.len()); + let mut n_it = nrow.iter(); + let mut o_it = orow.iter(); + let mut a_it = arow.iter(); - // Determine the higher index by ceiling the position. - let high = pos.ceil() as usize; + while let (Some(&n), Some(&o), Some(&a)) = (n_it.next(), o_it.next(), a_it.next()) { + let one_minus_a = one.saturating_sub(a); + let decayed = one_minus_a.saturating_mul(o); + let inc = a.saturating_mul(n).max(zero); + r.push(decayed.saturating_add(inc).min(one)); + } - // If the low and high indices are the same, return the value at that index. - if low == high { - sorted_data - .get(low) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)) - } else { - // Otherwise, perform linear interpolation between the low and high values. - let low_value = sorted_data - .get(low) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)); - let high_value = sorted_data - .get(high) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)); - - // Calculate the weight for interpolation. - let weight = I32F32::saturating_from_num(pos - low as f64); - - // Return the interpolated value using saturating operations. - low_value.saturating_add((high_value.saturating_sub(low_value)).saturating_mul(weight)) + out.push(r); } + + out } /// Safe ln function, returns 0 if value is 0. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 7ab4446c3e..f4e94099ed 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -87,7 +87,7 @@ impl Pallet { Self::epoch_dense_mechanism(netuid, MechId::MAIN, rao_emission) } - /// Persists per-subsubnet epoch output in state + /// Persists per-mechanism epoch output in state pub fn persist_mechanism_epoch_terms( netuid: NetUid, mecid: MechId, diff --git a/pallets/subtensor/src/subnets/mechanism.rs b/pallets/subtensor/src/subnets/mechanism.rs index f8fa76ad51..6598c308f2 100644 --- a/pallets/subtensor/src/subnets/mechanism.rs +++ b/pallets/subtensor/src/subnets/mechanism.rs @@ -39,6 +39,15 @@ impl Pallet { .into() } + pub fn get_netuid(netuid_index: NetUidStorageIndex) -> NetUid { + if let Some(netuid) = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT) { + NetUid::from(netuid) + } else { + // Because GLOBAL_MAX_SUBNET_COUNT is not zero, this never happens + NetUid::ROOT + } + } + pub fn get_netuid_and_subid( netuid_index: NetUidStorageIndex, ) -> Result<(NetUid, MechId), Error> { diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index a200fa8b25..6c6636ca68 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1341,39 +1341,6 @@ fn test_math_row_sum_sparse() { assert_vec_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_col_sum() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = col_sum(&matrix); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_col_sum_sparse() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[21., 21., 21.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![1., 0., 3., 4., 0., 6., 7., 0., 9., 10., 0., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 0., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); -} - #[test] fn test_math_matmul() { let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); @@ -1468,51 +1435,6 @@ fn test_math_col_clip_sparse() { assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_clip_sparse() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = clip_sparse( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - let result = clip( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_inplace_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mut matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - inplace_clip( - &mut matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&matrix, &target, I32F32::from_num(0)); -} - #[test] fn test_math_weighted_median() { let mut rng = thread_rng(); @@ -2083,70 +2005,6 @@ fn test_math_interpolate_sparse() { assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_hadamard() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); -} - -#[test] -fn test_math_hadamard_sparse() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); -} - #[test] fn test_math_mat_ema_alpha() { let old: Vec = vec![ @@ -2726,9 +2584,7 @@ fn test_mat_ema_alpha_single_element() { assert_eq!(result, expected); } -// TODO: (@sd): Should these be non panicking? #[test] -#[should_panic(expected = "assertion failed")] fn test_mat_ema_alpha_mismatched_dimensions() { let new = mat_to_fixed(&[vec![1.0, 2.0], vec![3.0, 4.0]]); let old = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); @@ -2740,41 +2596,6 @@ fn test_mat_ema_alpha_mismatched_dimensions() { ]; 2 ]; - let _result = mat_ema_alpha(&new, &old, &alpha); -} - -#[test] -fn test_quantile() { - // Test with a non-empty vector and valid quantile values - let data = vec![ - I32F32::from_num(1.0), - I32F32::from_num(2.0), - I32F32::from_num(3.0), - I32F32::from_num(4.0), - I32F32::from_num(5.0), - ]; - - // Test 0th quantile (minimum) - let result = quantile(&data, 0.0); - assert_eq!(result, I32F32::from_num(1.0)); - - // Test 25th quantile - let result = quantile(&data, 0.25); - assert_eq!(result, I32F32::from_num(2.0)); - - // Test 50th quantile (median) - let result = quantile(&data, 0.5); - assert_eq!(result, I32F32::from_num(3.0)); - - // Test 66th quantile - let result = quantile(&data, 0.66); - assert_eq!(result, I32F32::from_num(3.64)); - - // Test 75th quantile - let result = quantile(&data, 0.75); - assert_eq!(result, I32F32::from_num(4.0)); - - // Test 100th quantile (maximum) - let result = quantile(&data, 1.0); - assert_eq!(result, I32F32::from_num(5.0)); + let result = mat_ema_alpha(&new, &old, &alpha); + assert_eq!(result[0][0], old[0][0]) } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 9ca7e361cc..88444a1b83 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -206,8 +206,16 @@ impl Pallet { pub fn get_dividends(netuid: NetUid) -> Vec { Dividends::::get(netuid) } - pub fn get_last_update(netuid: NetUidStorageIndex) -> Vec { - LastUpdate::::get(netuid) + /// Fetch LastUpdate for `netuid` and ensure its length is at least `get_subnetwork_n(netuid)`, + /// padding with zeros if needed. Returns the (possibly padded) vector. + pub fn get_last_update(netuid_index: NetUidStorageIndex) -> Vec { + let netuid = Self::get_netuid(netuid_index); + let target_len = Self::get_subnetwork_n(netuid) as usize; + let mut v = LastUpdate::::get(netuid_index); + if v.len() < target_len { + v.resize(target_len, 0); + } + v } pub fn get_pruning_score(netuid: NetUid) -> Vec { PruningScores::::get(netuid) From 29414e54a19aaeba8e26c0507894f8d1e0bb09a2 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 17:02:57 -0400 Subject: [PATCH 2/9] Cleanup code comments --- pallets/subtensor/src/epoch/math.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 6288ac14ae..ccc23a1bed 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -231,10 +231,10 @@ pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort (no indexing) + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort for &idx in idxs.iter().take(n.saturating_sub(k)) { if let Some(cell) = result.get_mut(idx) { - *cell = false; // no indexing + *cell = false; } } result @@ -1003,17 +1003,17 @@ pub fn weighted_median_col( ) -> Vec { let zero = I32F32::saturating_from_num(0.0); - // Determine number of columns from the first row (no indexing). + // Determine number of columns from the first row. let columns = score.first().map(|r| r.len()).unwrap_or(0); let mut median = vec![zero; columns]; - // Iterate columns without indexing into `median`. + // Iterate columns into `median`. let mut c = 0usize; for med_cell in median.iter_mut() { let mut use_stake: Vec = Vec::new(); let mut use_score: Vec = Vec::new(); - // Iterate rows aligned with `stake` length; avoid indexing into `stake`/`score`. + // Iterate rows aligned with `stake` length. let mut r = 0usize; while r < stake.len() { let st = get_safe::(stake, r); @@ -1098,7 +1098,7 @@ pub fn weighted_median_col_sparse( } } - // Compute weighted median per column without indexing. + // Compute weighted median per column. let mut median: Vec = Vec::with_capacity(columns as usize); for col_vec in use_score.iter() { median.push(weighted_median( From 93cb2da050977ddf8abc6e308fbcaa786d336809 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 17:11:17 -0400 Subject: [PATCH 3/9] Add a test for mismatching sizes of LastUpdate vector and Weights matrix (issue that killed TestNet) --- pallets/subtensor/src/tests/epoch.rs | 56 ++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index fec978a51d..7c23dc2b2c 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -3827,3 +3827,59 @@ fn test_epoch_does_not_mask_outside_window_but_masks_inside() { ); }); } + +// Test an epoch doesn't panic when LastUpdate size doesn't match to Weights size. +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_last_update_size_mismatch --exact --show-output --nocapture +#[test] +fn test_last_update_size_mismatch() { + new_test_ext(1).execute_with(|| { + log::info!("test_1_graph:"); + let netuid = NetUid::from(1); + let coldkey = U256::from(0); + let hotkey = U256::from(0); + let uid: u16 = 0; + let stake_amount: u64 = 1_000_000_000; + add_network_disable_commit_reveal(netuid, u16::MAX - 1, 0); + SubtensorModule::set_max_allowed_uids(netuid, 1); + SubtensorModule::add_balance_to_coldkey_account( + &coldkey, + stake_amount + ExistentialDeposit::get(), + ); + register_ok_neuron(netuid, hotkey, coldkey, 1); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + stake_amount.into() + )); + + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); + run_to_block(1); // run to next block to ensure weights are set on nodes after their registration block + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + vec![uid], + vec![u16::MAX], + 0 + )); + + // Set mismatching LastUpdate vector + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![1, 1, 1]); + + SubtensorModule::epoch(netuid, 1_000_000_000.into()); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + stake_amount.into() + ); + assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); + assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); + assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); + assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); + }); +} From e6847636e0c332637cc8cb6fcee7ea698c014489 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 17:38:02 -0400 Subject: [PATCH 4/9] Remove remaining asserts in the codebase --- pallets/subtensor/src/epoch/run_epoch.rs | 17 ++++++++++++----- pallets/subtensor/src/utils/misc.rs | 13 +++++++------ 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index f4e94099ed..7820a67f40 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1387,14 +1387,17 @@ impl Pallet { bonds: &[Vec], // previous epoch bonds consensus: &[I32F32], // previous epoch consensus weights ) -> Vec> { - assert!(weights.len() == bonds.len()); + let mut alphas = Vec::new(); + + if weights.len() != bonds.len() { + log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + return alphas; + } // Get the high and low alpha values for the network. let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - let mut alphas = Vec::new(); - for (w_row, b_row) in weights.iter().zip(bonds.iter()) { let mut row_alphas = Vec::new(); @@ -1433,12 +1436,16 @@ impl Pallet { bonds: &[Vec<(u16, I32F32)>], // previous epoch bonds consensus: &[I32F32], // previous epoch consensus weights ) -> Vec> { - assert!(weights.len() == bonds.len()); + let mut alphas = Vec::with_capacity(consensus.len()); + + if weights.len() != bonds.len() { + log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + return alphas; + } let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - let mut alphas = Vec::with_capacity(consensus.len()); let zero = I32F32::from_num(0.0); // iterate over rows diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 88444a1b83..2a8e55cb63 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -253,12 +253,13 @@ impl Pallet { SubnetworkN::::get(netuid) ); log::debug!("uid = {uid:?}"); - assert!(uid < SubnetworkN::::get(netuid)); - PruningScores::::mutate(netuid, |v| { - if let Some(s) = v.get_mut(uid as usize) { - *s = pruning_score; - } - }); + if uid < SubnetworkN::::get(netuid)) { + PruningScores::::mutate(netuid, |v| { + if let Some(s) = v.get_mut(uid as usize) { + *s = pruning_score; + } + }); + } } pub fn set_validator_permit_for_uid(netuid: NetUid, uid: u16, validator_permit: bool) { let mut updated_validator_permits = Self::get_validator_permit(netuid); From 0f75c1b4627d693fac968e8fa728d627bd415870 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 18:21:35 -0400 Subject: [PATCH 5/9] Fix LastUpdate update in add_neuron --- pallets/subtensor/src/epoch/run_epoch.rs | 12 +++++- pallets/subtensor/src/subnets/uids.rs | 2 +- pallets/subtensor/src/tests/registration.rs | 41 ++++++++++++++++++++- pallets/subtensor/src/utils/misc.rs | 2 +- 4 files changed, 52 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 7820a67f40..660690ae9f 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1390,7 +1390,11 @@ impl Pallet { let mut alphas = Vec::new(); if weights.len() != bonds.len() { - log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + log::error!( + "compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + weights.len(), + bonds.len() + ); return alphas; } @@ -1439,7 +1443,11 @@ impl Pallet { let mut alphas = Vec::with_capacity(consensus.len()); if weights.len() != bonds.len() { - log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + log::error!( + "compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + weights.len(), + bonds.len() + ); return alphas; } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index cf639f9fbf..2fcf981780 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -117,7 +117,7 @@ impl Pallet { for mecid in 0..MechanismCountCurrent::::get(netuid).into() { let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); Incentive::::mutate(netuid_index, |v| v.push(0)); - LastUpdate::::mutate(netuid_index, |v| v.push(block_number)); + Self::set_last_update_for_uid(netuid_index, next_uid, block_number); } Dividends::::mutate(netuid, |v| v.push(0)); PruningScores::::mutate(netuid, |v| v.push(0)); diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 23013d9b70..48e887d606 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -9,7 +9,7 @@ use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; use sp_core::U256; use sp_runtime::traits::{DispatchInfoOf, TransactionExtension, TxBaseImplication}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex}; use super::mock; use super::mock::*; @@ -2149,6 +2149,45 @@ fn test_registration_disabled() { }); } +#[test] +fn test_last_update_correctness() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har + //add network + SubtensorModule::set_burn(netuid, burn_cost.into()); + add_network(netuid, tempo, 0); + + let reserve = 1_000_000_000_000; + mock::setup_reserves(netuid, reserve.into(), reserve.into()); + + // Simulate existing neurons + let existing_neurons = 3; + SubnetworkN::::insert(netuid, existing_neurons); + + // Simulate no LastUpdate so far (can happen on mechanisms) + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); + + // Give some $$$ to coldkey + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + // Check that LastUpdate has existing_neurons + 1 elements now + assert_eq!( + LastUpdate::::get(NetUidStorageIndex::from(netuid)).len(), + (existing_neurons + 1) as usize + ); + }); +} + // #[ignore] // #[test] // fn test_hotkey_swap_ok() { diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 2a8e55cb63..8febdfe208 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -253,7 +253,7 @@ impl Pallet { SubnetworkN::::get(netuid) ); log::debug!("uid = {uid:?}"); - if uid < SubnetworkN::::get(netuid)) { + if uid < SubnetworkN::::get(netuid) { PruningScores::::mutate(netuid, |v| { if let Some(s) = v.get_mut(uid as usize) { *s = pruning_score; From fe103e6a2c053e831b3bf7646b9fb15f472487ae Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 18:48:52 -0400 Subject: [PATCH 6/9] Revert bad rename for SubnetNotExists error --- pallets/admin-utils/src/tests/mod.rs | 2 +- pallets/subtensor/src/coinbase/root.rs | 2 +- pallets/subtensor/src/lib.rs | 4 ++-- pallets/subtensor/src/staking/recycle_alpha.rs | 10 ++-------- pallets/subtensor/src/staking/remove_stake.rs | 5 +---- pallets/subtensor/src/staking/set_children.rs | 5 +---- pallets/subtensor/src/subnets/registration.rs | 10 ++-------- pallets/subtensor/src/subnets/subnet.rs | 7 ++----- pallets/subtensor/src/subnets/uids.rs | 5 +---- pallets/subtensor/src/tests/children.rs | 10 +++++----- pallets/subtensor/src/tests/networks.rs | 2 +- pallets/subtensor/src/tests/recycle_alpha.rs | 4 ++-- pallets/subtensor/src/tests/subnet.rs | 2 +- pallets/subtensor/src/transaction_extension.rs | 2 +- 14 files changed, 23 insertions(+), 47 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 0e0232859b..b6cafb71b7 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2638,7 +2638,7 @@ fn test_trim_to_max_allowed_uids() { NetUid::from(42), new_max_n ), - pallet_subtensor::Error::::MechanismDoesNotExist + pallet_subtensor::Error::::SubnetNotExists ); // New max n less than lower bound diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index ac99d03838..4cb9f177e1 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -369,7 +369,7 @@ impl Pallet { // 1. --- The network exists? ensure!( Self::if_subnet_exist(netuid) && netuid != NetUid::ROOT, - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); // 2. --- Perform the cleanup before removing the network. diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index e452e858bc..f53ee4f58a 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1985,7 +1985,7 @@ pub enum CustomTransactionError { ColdkeyInSwapSchedule, StakeAmountTooLow, BalanceTooLow, - SubnetDoesntExist, + SubnetNotExists, HotkeyAccountDoesntExist, NotEnoughStakeToWithdraw, RateLimitExceeded, @@ -2010,7 +2010,7 @@ impl From for u8 { CustomTransactionError::ColdkeyInSwapSchedule => 0, CustomTransactionError::StakeAmountTooLow => 1, CustomTransactionError::BalanceTooLow => 2, - CustomTransactionError::SubnetDoesntExist => 3, + CustomTransactionError::SubnetNotExists => 3, CustomTransactionError::HotkeyAccountDoesntExist => 4, CustomTransactionError::NotEnoughStakeToWithdraw => 5, CustomTransactionError::RateLimitExceeded => 6, diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 371c5895e8..7334c8126a 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -23,10 +23,7 @@ impl Pallet { ) -> DispatchResult { let coldkey: T::AccountId = ensure_signed(origin)?; - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( !netuid.is_root(), @@ -91,10 +88,7 @@ impl Pallet { ) -> DispatchResult { let coldkey = ensure_signed(origin)?; - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( !netuid.is_root(), diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index bb136c1196..9d610ea88f 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -443,10 +443,7 @@ impl Pallet { pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { // 1) Ensure the subnet exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // 2) Owner / lock cost. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index b4629ec54d..cf7103b7ab 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -62,10 +62,7 @@ impl Pallet { ); // Check that the network we are trying to create the child on exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // Check that the coldkey owns the hotkey. ensure!( diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index 90ba2ea1aa..bd7bdeed57 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -78,10 +78,7 @@ impl Pallet { !netuid.is_root(), Error::::RegistrationNotPermittedOnRootSubnet ); - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // --- 3. Ensure the passed network allows registrations. ensure!( @@ -236,10 +233,7 @@ impl Pallet { !netuid.is_root(), Error::::RegistrationNotPermittedOnRootSubnet ); - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // --- 3. Ensure the passed network allows registrations. ensure!( diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index f44859bbe9..8439297e14 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -343,7 +343,7 @@ impl Pallet { /// /// # Raises /// - /// * `Error::::MechanismDoesNotExist`: If the subnet does not exist. + /// * `Error::::SubnetNotExists`: If the subnet does not exist. /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. /// @@ -351,10 +351,7 @@ impl Pallet { /// /// * `DispatchResult`: A result indicating the success or failure of the operation. pub fn do_start_call(origin: T::RuntimeOrigin, netuid: NetUid) -> DispatchResult { - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); Self::ensure_subnet_owner(origin, netuid)?; ensure!( FirstEmissionBlockNumber::::get(netuid).is_none(), diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 2fcf981780..b68fabfbd5 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -133,10 +133,7 @@ impl Pallet { pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { // Reasonable limits - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( max_n >= MinAllowedUids::::get(netuid), Error::::InvalidValue diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index cf11cf6190..0fee0af2ca 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -62,7 +62,7 @@ fn test_do_set_child_singular_network_does_not_exist() { netuid, vec![(proportion, child)] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } @@ -328,7 +328,7 @@ fn test_add_singular_child() { netuid, vec![(u64::MAX, child)] ), - Err(Error::::MechanismDoesNotExist.into()) + Err(Error::::SubnetNotExists.into()) ); add_network(netuid, 1, 0); step_rate_limit(&TransactionType::SetChildren, netuid); @@ -472,7 +472,7 @@ fn test_do_set_empty_children_network_does_not_exist() { netuid, vec![] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } @@ -601,7 +601,7 @@ fn test_do_schedule_children_multiple_network_does_not_exist() { netuid, vec![(proportion, child1)] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } @@ -1200,7 +1200,7 @@ fn test_do_revoke_children_multiple_network_does_not_exist() { netuid, vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 779879a129..42de84f54f 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -270,7 +270,7 @@ fn dissolve_nonexistent_subnet_fails() { new_test_ext(0).execute_with(|| { assert_err!( SubtensorModule::do_dissolve_network(9_999.into()), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index d230af6f30..173a03aea1 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -430,7 +430,7 @@ fn test_recycle_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); assert_noop!( @@ -502,7 +502,7 @@ fn test_burn_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); assert_noop!( diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index a1331a1707..a11eae759e 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -52,7 +52,7 @@ fn test_do_start_call_fail_with_not_existed_subnet() { <::RuntimeOrigin>::signed(coldkey_account_id), netuid ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index b56dff0ea0..42e45b5fd4 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -53,7 +53,7 @@ where if let Err(err) = result { Err(match err { Error::::AmountTooLow => CustomTransactionError::StakeAmountTooLow.into(), - Error::::SubnetNotExists => CustomTransactionError::SubnetDoesntExist.into(), + Error::::SubnetNotExists => CustomTransactionError::SubnetNotExists.into(), Error::::NotEnoughBalanceToStake => CustomTransactionError::BalanceTooLow.into(), Error::::HotKeyAccountNotExists => { CustomTransactionError::HotkeyAccountDoesntExist.into() From 8cf762e54609521c4b901e276975268f02686389 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 18:56:39 -0400 Subject: [PATCH 7/9] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index b6857cf7ae..254bec73a3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 317, + spec_version: 318, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 8b3d120d7fa8800c72b124fc9639b41da3e051b2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 19 Sep 2025 14:07:05 +0000 Subject: [PATCH 8/9] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index d6a199b0f1..20f377a925 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -233,7 +233,7 @@ mod dispatches { /// #[pallet::call_index(96)] #[pallet::weight((Weight::from_parts(67_770_000, 0) - .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, @@ -302,7 +302,7 @@ mod dispatches { /// #[pallet::call_index(100)] #[pallet::weight((Weight::from_parts(100_500_000, 0) - .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -1314,8 +1314,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(51_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(57_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1601,8 +1601,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(36_u64)) - .saturating_add(T::DbWeight::get().writes(50_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(38_u64)) + .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -2265,7 +2265,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(80_690_000, 0) - .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, From 7c62b8f4e9617dc746cbe1712db45254ce47af52 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 19 Sep 2025 12:13:41 -0400 Subject: [PATCH 9/9] Add error logging where runtime asserts were removed --- pallets/subtensor/src/epoch/math.rs | 112 +++++++++++++++++++++++++++- pallets/subtensor/src/utils/misc.rs | 6 ++ 2 files changed, 116 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index ccc23a1bed..4b613fa961 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -292,6 +292,14 @@ pub fn inplace_row_normalize_64(x: &mut [Vec]) { /// Returns x / y for input vectors x and y, if y == 0 return 0. pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { + if x.len() != y.len() { + log::error!( + "vecdiv input lengths are not equal: {:?} != {:?}", + x.len(), + y.len() + ); + } + let zero = I32F32::saturating_from_num(0); let mut out = Vec::with_capacity(x.len()); @@ -477,6 +485,14 @@ pub fn inplace_col_max_upscale(x: &mut [Vec]) { // Apply mask to vector, mask=true will mask out, i.e. set to 0. pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { + if mask.len() != vector.len() { + log::error!( + "inplace_mask_vector input lengths are not equal: {:?} != {:?}", + mask.len(), + vector.len() + ); + } + if mask.is_empty() { return; } @@ -490,6 +506,13 @@ pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { // Apply mask to matrix, mask=true will mask out, i.e. set to 0. pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "inplace_mask_matrix input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } let Some(first_row) = mask.first() else { return; }; @@ -513,6 +536,13 @@ pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "inplace_mask_rows input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } let Some(first_row) = matrix.first() else { return; }; @@ -528,6 +558,13 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { // Apply column mask to matrix, mask=true will mask out, i.e. set to 0. // Assumes each column has the same length. pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "inplace_mask_cols input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } if matrix.is_empty() { return; }; @@ -603,10 +640,13 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: return; } if matrix.len() != first_row.len() { - log::error!("inplace_mask_diag_except_index: input matrix is not square"); + log::error!( + "inplace_mask_diag input matrix is now square: {:?} != {:?}", + matrix.len(), + first_row.len() + ); return; } - let diag_at_index = matrix .get(except_index as usize) .and_then(|row| row.get(except_index as usize)) @@ -752,6 +792,13 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { if cols == 0 { return vec![]; } + if matrix.len() != vector.len() { + log::error!( + "matmul input sizes are not equal: {:?} != {:?}", + matrix.len(), + vector.len() + ); + } let zero = I32F32::saturating_from_num(0.0); let mut acc = vec![zero; cols]; @@ -783,6 +830,13 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], mat2: &[Vec], ratio: I32F32) -> if mat1.is_empty() || mat1.first().map(|r| r.is_empty()).unwrap_or(true) { return vec![vec![]]; } + if mat1.len() != mat2.len() { + log::error!( + "interpolate mat1.len() != mat2.len(): {:?} != {:?}", + mat1.len(), + mat2.len() + ); + } let zero = I32F32::saturating_from_num(0.0); let cols = mat1.first().map(|r| r.len()).unwrap_or(0); @@ -1147,8 +1213,16 @@ pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> for row1 in mat1.iter() { let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else { + log::error!("interpolate: No more rows in mat2"); break; }; + if row1.len() != row2.len() { + log::error!( + "interpolate row1.len() != row2.len(): {:?} != {:?}", + row1.len(), + row2.len() + ); + } // Walk elements of row1, row2, and out_row in lockstep; stop at the shortest. let mut r1_it = row1.iter(); @@ -1184,6 +1258,11 @@ pub fn interpolate_sparse( } if mat1.len() != mat2.len() { // In case if sizes mismatch, return clipped weights + log::error!( + "interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}", + mat1.len(), + mat2.len() + ); return mat2.to_owned(); } let rows = mat1.len(); @@ -1329,6 +1408,14 @@ pub fn mat_ema_sparse( old: &[Vec<(u16, I32F32)>], alpha: I32F32, ) -> Vec> { + if new.len() != old.len() { + log::error!( + "mat_ema_sparse: new.len() == old.len(): {:?} != {:?}", + new.len(), + old.len() + ); + } + let zero = I32F32::saturating_from_num(0.0); let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha); @@ -1381,6 +1468,12 @@ pub fn mat_ema_alpha_sparse( ) -> Vec> { // If shapes don't match, just return `new` if new.len() != old.len() || new.len() != alpha.len() { + log::error!( + "mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}", + old.len(), + new.len(), + alpha.len() + ); return new.to_owned(); } @@ -1397,6 +1490,15 @@ pub fn mat_ema_alpha_sparse( break; }; + if new_row.len() != old_row.len() || new_row.len() != alpha_row.len() { + log::error!( + "mat_ema_alpha_sparse row shapes don't match: {:?} vs. {:?} vs. {:?}", + old_row.len(), + new_row.len(), + alpha_row.len() + ); + } + // Densified accumulator sized to alpha_row length (columns outside are ignored). let mut decayed_values = vec![zero; alpha_row.len()]; @@ -1443,6 +1545,12 @@ pub fn mat_ema_alpha( // If outer dimensions don't match, return bonds unchanged if new.len() != old.len() || new.len() != alpha.len() { + log::error!( + "mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}", + old.len(), + new.len(), + alpha.len() + ); return old.to_owned(); } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 8febdfe208..a4d4755e5d 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -259,6 +259,12 @@ impl Pallet { *s = pruning_score; } }); + } else { + log::error!( + "set_pruning_score_for_uid: uid >= SubnetworkN::::get(netuid): {:?} >= {:?}", + uid, + SubnetworkN::::get(netuid) + ); } } pub fn set_validator_permit_for_uid(netuid: NetUid, uid: u16, validator_permit: bool) {