diff --git a/bittensor/_neuron/text/core_server/__init__.py b/bittensor/_neuron/text/core_server/__init__.py index e6d040951d..93b57c9e30 100644 --- a/bittensor/_neuron/text/core_server/__init__.py +++ b/bittensor/_neuron/text/core_server/__init__.py @@ -142,7 +142,7 @@ def __init__( self.prometheus_info = Info('neuron_info', "Info sumamries for the running server-miner.", registry=registry) self.config.to_prometheus() - if self.config.netuid == None and self.config.subtensor.network == 'finney': + if self.config.netuid == None and self.config.subtensor.network != 'nakamoto': subtensor = bittensor.subtensor(config = config) if subtensor == None else subtensor self.config.netuid = subtensor.get_subnets()[0] @@ -182,7 +182,7 @@ def __init__( ) # Verify subnet exists - if self.config.subtensor.network == 'finney' and not self.subtensor.subnet_exists( netuid = self.config.netuid ): + if self.config.subtensor.network != 'nakamoto' and not self.subtensor.subnet_exists( netuid = self.config.netuid ): bittensor.__console__.print(f"[red]Subnet {self.config.netuid} does not exist[/red]") sys.exit(1) @@ -652,25 +652,25 @@ def hotkey_check(): return True def get_neuron(self): - if self.subtensor.network == 'finney': - nn = self.subtensor.get_neuron_for_pubkey_and_subnet(self.wallet.hotkey.ss58_address, netuid = self.config.netuid) - elif self.subtensor.network == 'nakamoto': + if self.subtensor.network == 'nakamoto': nn = self.subtensor.neuron_for_pubkey(self.wallet.hotkey.ss58_address) + else: + nn = self.subtensor.get_neuron_for_pubkey_and_subnet(self.wallet.hotkey.ss58_address, netuid = self.config.netuid) return nn def get_neuron_num(self): - if self.subtensor.network == 'finney': - n = self.subtensor.subnetwork_n( netuid = self.config.netuid) - elif self.subtensor.network == 'nakamoto': + if self.subtensor.network == 'nakamoto': n = self.subtensor.n() + else: + n = self.subtensor.subnetwork_n( netuid = self.config.netuid) return n def get_blocks_per_set_weights(self): blocks_per_set_weights = self.config.neuron.blocks_per_set_weights if blocks_per_set_weights == -1: - if self.subtensor.network == 'finney': - blocks_per_set_weights = self.subtensor.validator_epoch_length(self.config.netuid) - elif self.subtensor.network == 'nakamoto': + if self.subtensor.network == 'nakamoto': blocks_per_set_weights = self.subtensor.validator_epoch_length + else: + blocks_per_set_weights = self.subtensor.validator_epoch_length(self.config.netuid) return blocks_per_set_weights \ No newline at end of file diff --git a/bittensor/_neuron/text/core_validator/__init__.py b/bittensor/_neuron/text/core_validator/__init__.py index f585dcb12d..ba3daf08a4 100644 --- a/bittensor/_neuron/text/core_validator/__init__.py +++ b/bittensor/_neuron/text/core_validator/__init__.py @@ -102,11 +102,11 @@ def __init__( config.netuid = netuid if netuid != None else config.netuid subtensor = bittensor.subtensor ( config = config ) if subtensor == None else subtensor - if config.subtensor.network == 'finney' and config.netuid == None: + if config.subtensor.network != 'nakamoto' and config.netuid == None: config.netuid = subtensor.get_subnets()[0] # Verify subnet exists - if config.subtensor.network == 'finney' and not subtensor.subnet_exists( netuid = config.netuid ): + if config.subtensor.network != 'nakamoto' and not subtensor.subnet_exists( netuid = config.netuid ): bittensor.__console__.print(f"[red]Subnet {config.netuid} does not exist[/red]") sys.exit(1) @@ -143,14 +143,15 @@ def __init__( self.axon = bittensor.axon ( netuid=self.config.netuid, config = self.config, wallet = self.wallet ) if axon == None else axon self.device = torch.device ( device = self.config.neuron.device ) self.nucleus = nucleus ( config = self.config, device = self.device, subtensor = self.subtensor, vlogger = self.vlogger ).to( self.device ) - if self.config.subtensor.network == 'finney': - self.dataset = (bittensor.dataset(config=self.config, batch_size=self.subtensor.validator_batch_size(self.config.netuid), - block_size=self.subtensor.validator_sequence_length(self.config.netuid) + self.config.neuron.validation_len + self.subtensor.validator_prune_len(netuid=self.config.netuid)) - if dataset is None else dataset) - else: + if self.config.subtensor.network == 'nakamoto': self.dataset = (bittensor.dataset(config=self.config, batch_size=self.subtensor.validator_batch_size, block_size=self.subtensor.validator_sequence_length + self.config.neuron.validation_len + self.subtensor.validator_prune_len) if dataset is None else dataset) + else: + # Default to finney + self.dataset = (bittensor.dataset(config=self.config, batch_size=self.subtensor.validator_batch_size(self.config.netuid), + block_size=self.subtensor.validator_sequence_length(self.config.netuid) + self.config.neuron.validation_len + self.subtensor.validator_prune_len(netuid=self.config.netuid)) + if dataset is None else dataset) self.optimizer = torch.optim.SGD( self.nucleus.parameters(), lr=self.config.neuron.learning_rate, momentum=self.config.neuron.momentum @@ -380,17 +381,7 @@ def run_epoch( self ): """ # === Get params for epoch === # Pulling the latest chain parameters. - if self.config.subtensor.network == 'finney': - batch_size = self.subtensor.validator_batch_size(netuid=self.config.netuid) - sequence_length = self.subtensor.validator_sequence_length(netuid=self.config.netuid) - # Number of tokens to prune for phrase validation beyond sequence context - prune_len = self.config.neuron.prune_len = self.subtensor.validator_prune_len(netuid=self.config.netuid) - self.config.nucleus.logits_divergence = self.subtensor.validator_logits_divergence(netuid=self.config.netuid) - min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) - max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) - self.config.nucleus.scaling_law_power = self.subtensor.scaling_law_power(netuid=self.config.netuid) - self.config.nucleus.synergy_scaling_law_power = self.subtensor.synergy_scaling_law_power(netuid=self.config.netuid) - else: + if self.config.subtensor.network == 'nakamoto': batch_size = self.subtensor.validator_batch_size sequence_length = self.subtensor.validator_sequence_length # Number of tokens to prune for phrase validation beyond sequence context @@ -400,6 +391,17 @@ def run_epoch( self ): max_weight_limit = self.subtensor.max_weight_limit self.config.nucleus.scaling_law_power = self.subtensor.scaling_law_power self.config.nucleus.synergy_scaling_law_power = self.subtensor.synergy_scaling_law_power + else: + # Default to finney + batch_size = self.subtensor.validator_batch_size(netuid=self.config.netuid) + sequence_length = self.subtensor.validator_sequence_length(netuid=self.config.netuid) + # Number of tokens to prune for phrase validation beyond sequence context + prune_len = self.config.neuron.prune_len = self.subtensor.validator_prune_len(netuid=self.config.netuid) + self.config.nucleus.logits_divergence = self.subtensor.validator_logits_divergence(netuid=self.config.netuid) + min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) + max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) + self.config.nucleus.scaling_law_power = self.subtensor.scaling_law_power(netuid=self.config.netuid) + self.config.nucleus.synergy_scaling_law_power = self.subtensor.synergy_scaling_law_power(netuid=self.config.netuid) current_block = self.subtensor.block validation_len = self.config.neuron.validation_len # Number of tokens to holdout for phrase validation beyond sequence context @@ -535,8 +537,8 @@ def run_epoch( self ): # console table - weight table (every validation step) sample_uids, sample_weights = self.calculate_weights() self.vlogger.print_weights_table( - min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights, - max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.max_weight_limit, + min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network != 'nakamoto' else self.subtensor.min_allowed_weights, + max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network != 'nakamoto' else self.subtensor.max_weight_limit, neuron_stats = self.neuron_stats, title = str(self), metagraph_n = self.metagraph.n.item(), @@ -581,8 +583,8 @@ def run_epoch( self ): if self.config.logging.debug or self.config.logging.trace: # console table - weight table (every end of epoch) self.vlogger.print_weights_table( - min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights, - max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights, + min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network != 'nakamoto' else self.subtensor.min_allowed_weights, + max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network != 'nakamoto' else self.subtensor.min_allowed_weights, neuron_stats = self.neuron_stats, title = str(self), metagraph_n = self.metagraph.n.item(), @@ -728,8 +730,8 @@ def calculate_weights(self): weight_key = self.weight_key + '!' # use zeroing key to penalize non-responsive neurons - min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights - max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.max_weight_limit + min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network != 'nakamoto' else self.subtensor.min_allowed_weights + max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network != 'nakamoto' else self.subtensor.max_weight_limit # === Populate neuron weights === @@ -749,10 +751,11 @@ def calculate_weights(self): # === Exclude lowest quantile from weight setting === max_exclude = (len(sample_weights) - min_allowed_weights) / len(sample_weights) # max excludable weight quantile - if self.config.subtensor.network == 'finney': - quantile = self.subtensor.validator_exclude_quantile(netuid=self.config.netuid) if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile - else: + if self.config.subtensor.network == 'nakamoto': quantile = self.subtensor.validator_exclude_quantile if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile + else: + # Default to finney + quantile = self.subtensor.validator_exclude_quantile(netuid=self.config.netuid) if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile if 0 < max_exclude: exclude_quantile = min([quantile , max_exclude]) # reduce quantile to meet min_allowed_weights lowest_quantile = sample_weights.quantile(exclude_quantile) # find lowest quantile threshold @@ -771,12 +774,12 @@ def calculate_weights(self): return sample_uids, sample_weights def get_validator_epoch_length(self): - validator_epoch_length = self.subtensor.validator_epoch_length(self.config.netuid) if self.subtensor.network == 'finney' else self.subtensor.validator_epoch_length + validator_epoch_length = self.subtensor.validator_epoch_length(self.config.netuid) if self.subtensor.network != 'nakamoto' else self.subtensor.validator_epoch_length return validator_epoch_length def get_validator_epochs_per_reset(self): - validator_epochs_per_reset = self.subtensor.validator_epochs_per_reset(self.config.netuid) if self.subtensor.network == 'finney' else self.subtensor.validator_epochs_per_reset + validator_epochs_per_reset = self.subtensor.validator_epochs_per_reset(self.config.netuid) if self.subtensor.network != 'nakamoto' else self.subtensor.validator_epochs_per_reset return validator_epochs_per_reset @@ -788,16 +791,17 @@ def __init__( self, config, device, subtensor, vlogger ): self.config = config self.vlogger = vlogger - if self.config.subtensor.network == 'finney': - self.config.nucleus.logits_divergence = subtensor.validator_logits_divergence(netuid=self.config.netuid) if self.config.nucleus.logits_divergence == -1 else self.config.nucleus.logits_divergence - self.config.nucleus.scaling_law_power = subtensor.scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power - self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power - self.max_n = subtensor.max_n(netuid=self.config.netuid) - else: + if self.config.subtensor.network == 'nakamoto': self.config.nucleus.logits_divergence = subtensor.validator_logits_divergence if self.config.nucleus.logits_divergence == -1 else self.config.nucleus.logits_divergence self.config.nucleus.scaling_law_power = subtensor.scaling_law_power if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power self.max_n = subtensor.max_n + else: + # Default to finney + self.config.nucleus.logits_divergence = subtensor.validator_logits_divergence(netuid=self.config.netuid) if self.config.nucleus.logits_divergence == -1 else self.config.nucleus.logits_divergence + self.config.nucleus.scaling_law_power = subtensor.scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power + self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power + self.max_n = subtensor.max_n(netuid=self.config.netuid) self.device = device self.permute_uids = [] # iterable of next UIDs to query, reset to permuted UIDs when empty diff --git a/bittensor/_wallet/__init__.py b/bittensor/_wallet/__init__.py index 69bc11e4d1..a96b4cb618 100644 --- a/bittensor/_wallet/__init__.py +++ b/bittensor/_wallet/__init__.py @@ -79,15 +79,16 @@ def __new__( ) network = config.get('subtensor.network', bittensor.defaults.subtensor.network) - if network == 'finney': - return wallet_impl.Wallet( + if network == 'nakamoto': + return naka_wallet( name = config.wallet.get('name', bittensor.defaults.wallet.name), hotkey = config.wallet.get('hotkey', bittensor.defaults.wallet.hotkey), path = config.wallet.path, config = config ) - elif network == 'nakamoto': - return naka_wallet( + else: + # Default to finney. + return wallet_impl.Wallet( name = config.wallet.get('name', bittensor.defaults.wallet.name), hotkey = config.wallet.get('hotkey', bittensor.defaults.wallet.hotkey), path = config.wallet.path,