diff --git a/CHANGELOG.md b/CHANGELOG.md index 83254fbd3d..0e606fd03c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,48 @@ # Changelog +## 6.2.0 / 2023-10-30 + +## What's Changed +* (un)Staking multiple avoid tx limit by @camfairchild in https://github.com/opentensor/bittensor/pull/1244 +* additional logging for prometheus by @Eugene-hu in https://github.com/opentensor/bittensor/pull/1246 +* Dataset fix by @isabella618033 in https://github.com/opentensor/bittensor/pull/1249 +* Grab delegates details from GitHub by @camfairchild in https://github.com/opentensor/bittensor/pull/1245 +* Add raw spec for local test and new bins by @camfairchild in https://github.com/opentensor/bittensor/pull/1243 +* Fix list_delegates on non-archive nodes by @camfairchild in https://github.com/opentensor/bittensor/pull/1232 +* Blacklist fixes + depreciation of old signatures by @Eugene-hu in https://github.com/opentensor/bittensor/pull/1240 +* [BIT-636] Change u16 weight normalization to max-upscaling by @opentaco in https://github.com/opentensor/bittensor/pull/1241 +* remove duplicate command #1228 by @camfairchild in https://github.com/opentensor/bittensor/pull/1231 +* test_forward_priority_2nd_request_timeout fix by @isabella618033 in https://github.com/opentensor/bittensor/pull/1276 +* Remove btcli query and btcli set_weights by @camfairchild in https://github.com/opentensor/bittensor/pull/1144 +* Merge releases 4.0.0 and 4.0.1 back to staging by @camfairchild in https://github.com/opentensor/bittensor/pull/1306 +* Improve development workflow documentation by @quac88 in https://github.com/opentensor/bittensor/pull/1262 +* staging updates and fixes by @ifrit98 in https://github.com/opentensor/bittensor/pull/1540 +* Add root get_weights command to btcli by @Rubberbandits in https://github.com/opentensor/bittensor/pull/1536 +* Fix typo by @steffencruz in https://github.com/opentensor/bittensor/pull/1543 +* remove duplicated debug message in dendrite by @ifrit98 in https://github.com/opentensor/bittensor/pull/1544 +* Cli fix by @ifrit98 in https://github.com/opentensor/bittensor/pull/1541 +* update faucet helpstr by @ifrit98 in https://github.com/opentensor/bittensor/pull/1542 +* Added mechanism to sum all delegated tao by @shibshib in https://github.com/opentensor/bittensor/pull/1547 +* Dict hash fix by @ifrit98 in https://github.com/opentensor/bittensor/pull/1548 +* Release/6.1.0 by @ifrit98 in https://github.com/opentensor/bittensor/pull/1550 +* Merge master by @ifrit98 in https://github.com/opentensor/bittensor/pull/1552 +* Streaming fix by @ifrit98 in https://github.com/opentensor/bittensor/pull/1551 +* Fix typos by @omahs in https://github.com/opentensor/bittensor/pull/1553 +* Normalize weights in r get weights table by @camfairchild in https://github.com/opentensor/bittensor/pull/1556 +* Dendrite & Synapse updates and fixes by @ifrit98 in https://github.com/opentensor/bittensor/pull/1555 +* rm root flag in metagraph by @ifrit98 in https://github.com/opentensor/bittensor/pull/1558 +* Max Faucet Runs == 3 by @ifrit98 in https://github.com/opentensor/bittensor/pull/1560 +* replace unknown wallet params (chain mismatch) with key values by @ifrit98 in https://github.com/opentensor/bittensor/pull/1559 +* Remove PoW registration cli and associated extrinsic by @ifrit98 in https://github.com/opentensor/bittensor/pull/1557 +* Add btcli wallet balance by @ifrit98 in https://github.com/opentensor/bittensor/pull/1564 +* Dendrite fixes by @ifrit98 in https://github.com/opentensor/bittensor/pull/1561 + +## New Contributors +* @omahs made their first contribution in https://github.com/opentensor/bittensor/pull/1553 + +**Full Changelog**: https://github.com/opentensor/bittensor/compare/v6.0.1...v6.2.0 + + ## 6.1.0 / 2023-10-17 ## What's Changed diff --git a/README.md b/README.md index c1552a29c2..5825b95a7d 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Bittensor is a mining network, similar to Bitcoin, that includes built-in incentives designed to encourage computers to provide access to machine learning models in an efficient and censorship-resistant manner. These models can be queried by users seeking outputs from the network, for instance; generating text, audio, and images, or for extracting numerical representations of these input types. Under the hood, Bittensor’s *economic market*, is facilitated by a blockchain token mechanism, through which producers (***miners***) and the verification of the work done by those miners (***validators***) are rewarded. Miners host, train or otherwise procure machine learning systems into the network as a means of fulfilling the verification problems defined by the validators, like the ability to generate responses from prompts i.e. “What is the capital of Texas?. -The token based mechanism under which the miners are incentivized ensures that they are constantly driven to make their knowledge output more useful, in terms of speed, intelligence and diversity. The value generated by the network is distributed directly to the individuals producing that value, without intermediarias. Anyone can participate in this endeavour, extract value from the network, and govern Bittensor. The network is open to all participants, and no individual or group has full control over what is learned, who can profit from it, or who can access it. +The token based mechanism under which the miners are incentivized ensures that they are constantly driven to make their knowledge output more useful, in terms of speed, intelligence and diversity. The value generated by the network is distributed directly to the individuals producing that value, without intermediaries. Anyone can participate in this endeavour, extract value from the network, and govern Bittensor. The network is open to all participants, and no individual or group has full control over what is learned, who can profit from it, or who can access it. To learn more about Bittensor, please read our [paper](https://bittensor.com/whitepaper). @@ -70,7 +70,7 @@ print (wallet) $ btcli wallet new_coldkey Enter wallet name (default): - IMPORTANT: Store this mnemonic in a secure (preferably offline place), as anyone who has possesion of this mnemonic can use it to regenerate the key and access your tokens. + IMPORTANT: Store this mnemonic in a secure (preferably offline place), as anyone who has possession of this mnemonic can use it to regenerate the key and access your tokens. The mnemonic to the new coldkey is: **** *** **** **** ***** **** *** **** **** **** ***** ***** You can use the mnemonic to recreate the key in case it gets lost. The command to use to regenerate the key using this mnemonic is: @@ -80,7 +80,7 @@ $ btcli wallet new_hotkey Enter wallet name (default): d1 Enter hotkey name (default): - IMPORTANT: Store this mnemonic in a secure (preferably offline place), as anyone who has possesion of this mnemonic can use it to regenerate the key and access your tokens. + IMPORTANT: Store this mnemonic in a secure (preferably offline place), as anyone who has possession of this mnemonic can use it to regenerate the key and access your tokens. The mnemonic to the new hotkey is: **** *** **** **** ***** **** *** **** **** **** ***** ***** You can use the mnemonic to recreate the key in case it gets lost. The command to use to regenerate the key using this mnemonic is: @@ -95,7 +95,7 @@ $ tree ~/.bittensor/ coldkey # You encrypted coldkey. coldkeypub.txt # Your coldkey public address hotkeys/ # The folder containing all of your hotkeys. - default # You unencrypeted hotkey information. + default # You unencrypted hotkey information. ``` Your default wallet ```Wallet (default, default, ~/.bittensor/wallets/)``` is always used unless you specify otherwise. Be sure to store your mnemonics safely. If you lose your password to your wallet, or the access to the machine where the wallet is stored, you can always regenerate the coldkey using the mnemonic you saved from above. ```bash @@ -183,18 +183,18 @@ For example: ```bash btcli subnets --help -usage: btcli subnets [-h] {list,metagraph,lock_cost,create,register,recycle_register,hyperparameters} ... +usage: btcli subnets [-h] {list,metagraph,lock_cost,create,register,pow_register,hyperparameters} ... positional arguments: - {list,metagraph,lock_cost,create,register,recycle_register,hyperparameters} + {list,metagraph,lock_cost,create,register,pow_register,hyperparameters} Commands for managing and viewing subnetworks. - list List all subnets on the network + list List all subnets on the network. metagraph View a subnet metagraph information. - lock_cost Return the lock cost to register a subnet + lock_cost Return the lock cost to register a subnet. create Create a new bittensor subnetwork on this chain. register Register a wallet to a network. - recycle_register Register a wallet to a network. - hyperparameters View subnet hyperparameters + register Register a wallet to a network using PoW. + hyperparameters View subnet hyperparameters. options: -h, --help show this help message and exit @@ -251,7 +251,7 @@ metagraph.save() metagraph.load() ``` -Synapse: Responsible for defining the protocol definition betwee axon servers and dendrite clients +Synapse: Responsible for defining the protocol definition between axon servers and dendrite clients ```python class Topk( bittensor.Synapse ): topk: int = 2 # Number of "top" elements to select @@ -289,12 +289,12 @@ def verify_my_synapse( synapse: MySyanpse ): # Apply custom verification logic to synapse # Optionally raise Exception -# Define a custom request blacklist fucntion +# Define a custom request blacklist function def blacklist_my_synapse( synapse: MySyanpse ) -> bool: # Apply custom blacklist # return False ( if non blacklisted ) or True ( if blacklisted ) -# Define a custom request priority fucntion +# Define a custom request priority function def prioritize_my_synape( synapse: MySyanpse ) -> float: # Apply custom priority return 1.0 @@ -312,7 +312,7 @@ my_axon.attach( ``` Dendrite: Inheriting from PyTorch's Module class, represents the abstracted implementation of a network client module designed -to send requests to those endpoint to recieve inputs. +to send requests to those endpoints to receive inputs. Example: ```python dendrite_obj = dendrite( wallet = bittensor.wallet() ) diff --git a/VERSION b/VERSION index 358e78e607..4ac4fded49 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -6.1.0 \ No newline at end of file +6.2.0 \ No newline at end of file diff --git a/bittensor/__init__.py b/bittensor/__init__.py index 600e5e8797..3215ce9155 100644 --- a/bittensor/__init__.py +++ b/bittensor/__init__.py @@ -27,7 +27,7 @@ nest_asyncio.apply() # Bittensor code and protocol version. -__version__ = "6.1.0" +__version__ = "6.2.0" version_split = __version__.split(".") __version_as_int__ = ( (100 * int(version_split[0])) @@ -87,7 +87,7 @@ def debug(on: bool = True): # Wallet ss58 address length __ss58_address_length__ = 48 -__networks__ = ["local", "finney"] +__networks__ = ["local", "finney", "test"] __finney_entrypoint__ = "wss://entrypoint-finney.opentensor.ai:443" diff --git a/bittensor/cli.py b/bittensor/cli.py index 673fa31419..a7ee579624 100644 --- a/bittensor/cli.py +++ b/bittensor/cli.py @@ -53,8 +53,8 @@ "metagraph": MetagraphCommand, "lock_cost": SubnetLockCostCommand, "create": RegisterSubnetworkCommand, + "pow_register": PowRegisterCommand, "register": RegisterCommand, - "recycle_register": RecycleRegisterCommand, "hyperparameters": SubnetHyperparamsCommand, }, }, @@ -85,7 +85,7 @@ "overview": OverviewCommand, "transfer": TransferCommand, "inspect": InspectCommand, - # "balance": None, + "balance": WalletBalanceCommand, "create": WalletCreateCommand, "new_hotkey": NewHotkeyCommand, "new_coldkey": NewColdkeyCommand, diff --git a/bittensor/commands/__init__.py b/bittensor/commands/__init__.py index b9e7721547..03822f9bf5 100644 --- a/bittensor/commands/__init__.py +++ b/bittensor/commands/__init__.py @@ -21,7 +21,7 @@ { "netuid": 1, "subtensor": {"network": "finney", "chain_endpoint": None, "_mock": False}, - "register": { + "pow_register": { "num_processes": None, "update_interval": 50000, "output_in_place": True, @@ -65,7 +65,7 @@ from .stake import StakeCommand, StakeShow from .unstake import UnStakeCommand from .overview import OverviewCommand -from .register import RegisterCommand, RecycleRegisterCommand, RunFaucetCommand +from .register import PowRegisterCommand, RegisterCommand, RunFaucetCommand from .delegates import ( NominateCommand, ListDelegatesCommand, @@ -81,6 +81,7 @@ RegenHotkeyCommand, UpdateWalletCommand, WalletCreateCommand, + WalletBalanceCommand, ) from .transfer import TransferCommand from .inspect import InspectCommand diff --git a/bittensor/commands/overview.py b/bittensor/commands/overview.py index 43e23552d2..733ec81d7c 100644 --- a/bittensor/commands/overview.py +++ b/bittensor/commands/overview.py @@ -121,7 +121,6 @@ def run(cli: "bittensor.cli"): ] = hotkey_wallet all_hotkey_addresses = list(hotkey_coldkey_to_hotkey_wallet.keys()) - with console.status( ":satellite: Syncing with chain: [white]{}[/white] ...".format( cli.config.subtensor.get( @@ -285,7 +284,11 @@ def run(cli: "bittensor.cli"): nn.coldkey, None ) if not hotwallet: - continue + # Indicates a mismatch between what the chain says the coldkey + # is for this hotkey and the local wallet coldkey-hotkey pair + hotwallet = argparse.Namespace() + hotwallet.name = nn.coldkey[:7] + hotwallet.hotkey_str = nn.hotkey[:7] nn: bittensor.NeuronInfoLite uid = nn.uid active = nn.active diff --git a/bittensor/commands/register.py b/bittensor/commands/register.py index 2fc615fd26..32c7f97fc4 100644 --- a/bittensor/commands/register.py +++ b/bittensor/commands/register.py @@ -27,6 +27,88 @@ class RegisterCommand: + @staticmethod + def run(cli): + r"""Register neuron by recycling some TAO.""" + wallet = bittensor.wallet(config=cli.config) + subtensor = bittensor.subtensor(config=cli.config) + + # Verify subnet exists + if not subtensor.subnet_exists(netuid=cli.config.netuid): + bittensor.__console__.print( + f"[red]Subnet {cli.config.netuid} does not exist[/red]" + ) + sys.exit(1) + + # Check current recycle amount + current_recycle = subtensor.burn(netuid=cli.config.netuid) + balance = subtensor.get_balance(address=wallet.coldkeypub.ss58_address) + + # Check balance is sufficient + if balance < current_recycle: + bittensor.__console__.print( + f"[red]Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO[/red]" + ) + sys.exit(1) + + if not cli.config.no_prompt: + if ( + Confirm.ask( + f"Your balance is: [bold green]{balance}[/bold green]\nThe cost to register by recycle is [bold red]{current_recycle}[/bold red]\nDo you want to continue?", + default=False, + ) + == False + ): + sys.exit(1) + + subtensor.burned_register( + wallet=wallet, netuid=cli.config.netuid, prompt=not cli.config.no_prompt + ) + + @staticmethod + def add_args(parser: argparse.ArgumentParser): + register_parser = parser.add_parser( + "register", help="""Register a wallet to a network.""" + ) + register_parser.add_argument( + "--netuid", + type=int, + help="netuid for subnet to serve this neuron on", + default=argparse.SUPPRESS, + ) + + bittensor.wallet.add_args(register_parser) + bittensor.subtensor.add_args(register_parser) + + @staticmethod + def check_config(config: "bittensor.config"): + if ( + not config.is_set("subtensor.network") + and not config.is_set("subtensor.chain_endpoint") + and not config.no_prompt + ): + config.subtensor.network = Prompt.ask( + "Enter subtensor network", + choices=bittensor.__networks__, + default=defaults.subtensor.network, + ) + _, endpoint = bittensor.subtensor.determine_chain_endpoint_and_network( + config.subtensor.network + ) + config.subtensor.chain_endpoint = endpoint + + check_netuid_set(config, subtensor=bittensor.subtensor(config=config)) + + if not config.is_set("wallet.name") and not config.no_prompt: + wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) + config.wallet.name = str(wallet_name) + + if not config.is_set("wallet.hotkey") and not config.no_prompt: + hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) + config.wallet.hotkey = str(hotkey) + + +class PowRegisterCommand: @staticmethod def run(cli): r"""Register neuron.""" @@ -44,23 +126,25 @@ def run(cli): wallet=wallet, netuid=cli.config.netuid, prompt=not cli.config.no_prompt, - TPB=cli.config.register.cuda.get("TPB", None), - update_interval=cli.config.register.get("update_interval", None), - num_processes=cli.config.register.get("num_processes", None), - cuda=cli.config.register.cuda.get( - "use_cuda", defaults.register.cuda.use_cuda + TPB=cli.config.pow_register.cuda.get("TPB", None), + update_interval=cli.config.pow_register.get("update_interval", None), + num_processes=cli.config.pow_register.get("num_processes", None), + cuda=cli.config.pow_register.cuda.get( + "use_cuda", defaults.pow_register.cuda.use_cuda ), - dev_id=cli.config.register.cuda.get("dev_id", None), - output_in_place=cli.config.register.get( - "output_in_place", defaults.register.output_in_place + dev_id=cli.config.pow_register.cuda.get("dev_id", None), + output_in_place=cli.config.pow_register.get( + "output_in_place", defaults.pow_register.output_in_place + ), + log_verbose=cli.config.pow_register.get( + "verbose", defaults.pow_register.verbose ), - log_verbose=cli.config.register.get("verbose", defaults.register.verbose), ) @staticmethod def add_args(parser: argparse.ArgumentParser): register_parser = parser.add_parser( - "register", help="""Register a wallet to a network.""" + "pow_register", help="""Register a wallet to a network using PoW.""" ) register_parser.add_argument( "--netuid", @@ -69,75 +153,75 @@ def add_args(parser: argparse.ArgumentParser): default=argparse.SUPPRESS, ) register_parser.add_argument( - "--register.num_processes", + "--pow_register.num_processes", "-n", - dest="register.num_processes", + dest="pow_register.num_processes", help="Number of processors to use for POW registration", type=int, - default=defaults.register.num_processes, + default=defaults.pow_register.num_processes, ) register_parser.add_argument( - "--register.update_interval", - "--register.cuda.update_interval", + "--pow_register.update_interval", + "--pow_register.cuda.update_interval", "--cuda.update_interval", "-u", help="The number of nonces to process before checking for next block during registration", type=int, - default=defaults.register.update_interval, + default=defaults.pow_register.update_interval, ) register_parser.add_argument( - "--register.no_output_in_place", + "--pow_register.no_output_in_place", "--no_output_in_place", - dest="register.output_in_place", + dest="pow_register.output_in_place", help="Whether to not ouput the registration statistics in-place. Set flag to disable output in-place.", action="store_false", required=False, - default=defaults.register.output_in_place, + default=defaults.pow_register.output_in_place, ) register_parser.add_argument( - "--register.verbose", + "--pow_register.verbose", help="Whether to ouput the registration statistics verbosely.", action="store_true", required=False, - default=defaults.register.verbose, + default=defaults.pow_register.verbose, ) ## Registration args for CUDA registration. register_parser.add_argument( - "--register.cuda.use_cuda", + "--pow_register.cuda.use_cuda", "--cuda", "--cuda.use_cuda", - dest="register.cuda.use_cuda", - default=defaults.register.cuda.use_cuda, + dest="pow_register.cuda.use_cuda", + default=defaults.pow_register.cuda.use_cuda, help="""Set flag to use CUDA to register.""", action="store_true", required=False, ) register_parser.add_argument( - "--register.cuda.no_cuda", + "--pow_register.cuda.no_cuda", "--no_cuda", "--cuda.no_cuda", - dest="register.cuda.use_cuda", - default=not defaults.register.cuda.use_cuda, + dest="pow_register.cuda.use_cuda", + default=not defaults.pow_register.cuda.use_cuda, help="""Set flag to not use CUDA for registration""", action="store_false", required=False, ) register_parser.add_argument( - "--register.cuda.dev_id", + "--pow_register.cuda.dev_id", "--cuda.dev_id", type=int, nargs="+", - default=defaults.register.cuda.dev_id, + default=defaults.pow_register.cuda.dev_id, help="""Set the CUDA device id(s). Goes by the order of speed. (i.e. 0 is the fastest).""", required=False, ) register_parser.add_argument( - "--register.cuda.TPB", + "--pow_register.cuda.TPB", "--cuda.TPB", type=int, - default=defaults.register.cuda.TPB, + default=defaults.pow_register.cuda.TPB, help="""Set the number of Threads Per Block for CUDA.""", required=False, ) @@ -145,76 +229,6 @@ def add_args(parser: argparse.ArgumentParser): bittensor.wallet.add_args(register_parser) bittensor.subtensor.add_args(register_parser) - @staticmethod - def check_config(config: "bittensor.config"): - check_netuid_set(config, subtensor=bittensor.subtensor(config=config)) - - if not config.is_set("wallet.name") and not config.no_prompt: - wallet_name = Prompt.ask("Enter wallet name", default=defaults.wallet.name) - config.wallet.name = str(wallet_name) - - if not config.is_set("wallet.hotkey") and not config.no_prompt: - hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) - config.wallet.hotkey = str(hotkey) - - if not config.no_prompt: - check_for_cuda_reg_config(config) - - -class RecycleRegisterCommand: - @staticmethod - def run(cli): - r"""Register neuron by recycling some TAO.""" - wallet = bittensor.wallet(config=cli.config) - subtensor = bittensor.subtensor(config=cli.config) - - # Verify subnet exists - if not subtensor.subnet_exists(netuid=cli.config.netuid): - bittensor.__console__.print( - f"[red]Subnet {cli.config.netuid} does not exist[/red]" - ) - sys.exit(1) - - # Check current recycle amount - current_recycle = subtensor.burn(netuid=cli.config.netuid) - balance = subtensor.get_balance(address=wallet.coldkeypub.ss58_address) - - # Check balance is sufficient - if balance < current_recycle: - bittensor.__console__.print( - f"[red]Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO[/red]" - ) - sys.exit(1) - - if not cli.config.no_prompt: - if ( - Confirm.ask( - f"Your balance is: [bold green]{balance}[/bold green]\nThe cost to register by recycle is [bold red]{current_recycle}[/bold red]\nDo you want to continue?", - default=False, - ) - == False - ): - sys.exit(1) - - subtensor.burned_register( - wallet=wallet, netuid=cli.config.netuid, prompt=not cli.config.no_prompt - ) - - @staticmethod - def add_args(parser: argparse.ArgumentParser): - recycle_register_parser = parser.add_parser( - "recycle_register", help="""Register a wallet to a network.""" - ) - recycle_register_parser.add_argument( - "--netuid", - type=int, - help="netuid for subnet to serve this neuron on", - default=argparse.SUPPRESS, - ) - - bittensor.wallet.add_args(recycle_register_parser) - bittensor.subtensor.add_args(recycle_register_parser) - @staticmethod def check_config(config: "bittensor.config"): if ( @@ -227,6 +241,10 @@ def check_config(config: "bittensor.config"): choices=bittensor.__networks__, default=defaults.subtensor.network, ) + _, endpoint = bittensor.subtensor.determine_chain_endpoint_and_network( + config.subtensor.network + ) + config.subtensor.chain_endpoint = endpoint check_netuid_set(config, subtensor=bittensor.subtensor(config=config)) @@ -238,6 +256,9 @@ def check_config(config: "bittensor.config"): hotkey = Prompt.ask("Enter hotkey name", default=defaults.wallet.hotkey) config.wallet.hotkey = str(hotkey) + if not config.no_prompt: + check_for_cuda_reg_config(config) + class RunFaucetCommand: @staticmethod @@ -248,17 +269,19 @@ def run(cli): subtensor.run_faucet( wallet=wallet, prompt=not cli.config.no_prompt, - TPB=cli.config.register.cuda.get("TPB", None), - update_interval=cli.config.register.get("update_interval", None), - num_processes=cli.config.register.get("num_processes", None), - cuda=cli.config.register.cuda.get( - "use_cuda", defaults.register.cuda.use_cuda + TPB=cli.config.pow_register.cuda.get("TPB", None), + update_interval=cli.config.pow_register.get("update_interval", None), + num_processes=cli.config.pow_register.get("num_processes", None), + cuda=cli.config.pow_register.cuda.get( + "use_cuda", defaults.pow_register.cuda.use_cuda ), - dev_id=cli.config.register.cuda.get("dev_id", None), - output_in_place=cli.config.register.get( - "output_in_place", defaults.register.output_in_place + dev_id=cli.config.pow_register.cuda.get("dev_id", None), + output_in_place=cli.config.pow_register.get( + "output_in_place", defaults.pow_register.output_in_place + ), + log_verbose=cli.config.pow_register.get( + "verbose", defaults.pow_register.verbose ), - log_verbose=cli.config.register.get("verbose", defaults.register.verbose), ) @staticmethod @@ -267,74 +290,74 @@ def add_args(parser: argparse.ArgumentParser): "faucet", help="""Perform PoW to receieve test TAO in your wallet.""" ) run_faucet_parser.add_argument( - "--register.num_processes", + "--faucet.num_processes", "-n", - dest="register.num_processes", + dest="pow_register.num_processes", help="Number of processors to use for POW registration", type=int, - default=defaults.register.num_processes, + default=defaults.pow_register.num_processes, ) run_faucet_parser.add_argument( - "--register.update_interval", - "--register.cuda.update_interval", + "--faucet.update_interval", + "--faucet.cuda.update_interval", "--cuda.update_interval", "-u", help="The number of nonces to process before checking for next block during registration", type=int, - default=defaults.register.update_interval, + default=defaults.pow_register.update_interval, ) run_faucet_parser.add_argument( - "--register.no_output_in_place", + "--faucet.no_output_in_place", "--no_output_in_place", - dest="register.output_in_place", + dest="pow_register.output_in_place", help="Whether to not ouput the registration statistics in-place. Set flag to disable output in-place.", action="store_false", required=False, - default=defaults.register.output_in_place, + default=defaults.pow_register.output_in_place, ) run_faucet_parser.add_argument( - "--register.verbose", + "--faucet.verbose", help="Whether to ouput the registration statistics verbosely.", action="store_true", required=False, - default=defaults.register.verbose, + default=defaults.pow_register.verbose, ) ## Registration args for CUDA registration. run_faucet_parser.add_argument( - "--register.cuda.use_cuda", + "--faucet.cuda.use_cuda", "--cuda", "--cuda.use_cuda", - dest="register.cuda.use_cuda", - default=defaults.register.cuda.use_cuda, - help="""Set flag to use CUDA to register.""", + dest="pow_register.cuda.use_cuda", + default=defaults.pow_register.cuda.use_cuda, + help="""Set flag to use CUDA to pow_register.""", action="store_true", required=False, ) run_faucet_parser.add_argument( - "--register.cuda.no_cuda", + "--faucet.cuda.no_cuda", "--no_cuda", "--cuda.no_cuda", - dest="register.cuda.use_cuda", - default=not defaults.register.cuda.use_cuda, + dest="pow_register.cuda.use_cuda", + default=not defaults.pow_register.cuda.use_cuda, help="""Set flag to not use CUDA for registration""", action="store_false", required=False, ) run_faucet_parser.add_argument( - "--register.cuda.dev_id", + "--faucet.cuda.dev_id", "--cuda.dev_id", type=int, nargs="+", - default=defaults.register.cuda.dev_id, + default=defaults.pow_register.cuda.dev_id, help="""Set the CUDA device id(s). Goes by the order of speed. (i.e. 0 is the fastest).""", required=False, ) run_faucet_parser.add_argument( - "--register.cuda.TPB", + "--faucet.cuda.TPB", "--cuda.TPB", type=int, - default=defaults.register.cuda.TPB, + default=defaults.pow_register.cuda.TPB, help="""Set the number of Threads Per Block for CUDA.""", required=False, ) diff --git a/bittensor/commands/root.py b/bittensor/commands/root.py index 0886a032de..78c31fa3d7 100644 --- a/bittensor/commands/root.py +++ b/bittensor/commands/root.py @@ -20,6 +20,7 @@ import torch import typing import argparse +import numpy as np import bittensor from typing import List, Optional, Dict from rich.prompt import Prompt, Confirm @@ -222,36 +223,58 @@ def run(cli): table = Table(show_footer=False) table.title = "[white]Root Network Weights" table.add_column( - "[overline white]UID", + "[white]UID", + header_style="overline white", footer_style="overline white", style="rgb(50,163,219)", no_wrap=True, ) - table.add_column( - "[overline white]NETUID", - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.add_column( - "[overline white]WEIGHT", - footer_style="overline white", - justify="right", - style="green", - no_wrap=True, - ) - table.show_footer = True + uid_to_weights = {} + netuids = set() for matrix in weights: - uid = matrix[0] - for weight_data in matrix[1]: - table.add_row( - str(uid), - str(weight_data[0]), - "{:0.2f}%".format((weight_data[1] / 65535) * 100), + [uid, weights_data] = matrix + + if not len(weights_data): + uid_to_weights[uid] = {} + normalized_weights = [] + else: + normalized_weights = np.array(weights_data)[:, 1] / max( + np.sum(weights_data, axis=0)[1], 1 ) + for weight_data, normalized_weight in zip(weights_data, normalized_weights): + [netuid, _] = weight_data + netuids.add(netuid) + if uid not in uid_to_weights: + uid_to_weights[uid] = {} + + uid_to_weights[uid][netuid] = normalized_weight + + for netuid in netuids: + table.add_column( + f"[white]{netuid}", + header_style="overline white", + footer_style="overline white", + justify="right", + style="green", + no_wrap=True, + ) + + for uid in uid_to_weights: + row = [str(uid)] + + uid_weights = uid_to_weights[uid] + for netuid in netuids: + if netuid in uid_weights: + normalized_weight = uid_weights[netuid] + row.append("{:0.2f}%".format(normalized_weight * 100)) + else: + row.append("-") + table.add_row(*row) + + table.show_footer = True + table.box = None table.pad_edge = False table.width = None diff --git a/bittensor/commands/utils.py b/bittensor/commands/utils.py index b54053622a..732d43d68a 100644 --- a/bittensor/commands/utils.py +++ b/bittensor/commands/utils.py @@ -80,15 +80,15 @@ def check_for_cuda_reg_config(config: "bittensor.config") -> None: """Checks, when CUDA is available, if the user would like to register with their CUDA device.""" if torch.cuda.is_available(): if not config.no_prompt: - if config.register.cuda.get("use_cuda") == None: # flag not set + if config.pow_register.cuda.get("use_cuda") == None: # flag not set # Ask about cuda registration only if a CUDA device is available. cuda = Confirm.ask("Detected CUDA device, use CUDA for registration?\n") - config.register.cuda.use_cuda = cuda + config.pow_register.cuda.use_cuda = cuda # Only ask about which CUDA device if the user has more than one CUDA device. if ( - config.register.cuda.use_cuda - and config.register.cuda.get("dev_id") is None + config.pow_register.cuda.use_cuda + and config.pow_register.cuda.get("dev_id") is None ): devices: List[str] = [str(x) for x in range(torch.cuda.device_count())] device_names: List[str] = [ @@ -122,11 +122,11 @@ def check_for_cuda_reg_config(config: "bittensor.config") -> None: ) ) sys.exit(1) - config.register.cuda.dev_id = dev_id + config.pow_register.cuda.dev_id = dev_id else: # flag was not set, use default value. - if config.register.cuda.get("use_cuda") is None: - config.register.cuda.use_cuda = defaults.register.cuda.use_cuda + if config.pow_register.cuda.get("use_cuda") is None: + config.pow_register.cuda.use_cuda = defaults.pow_register.cuda.use_cuda def get_hotkey_wallets_for_wallet(wallet) -> List["bittensor.wallet"]: diff --git a/bittensor/commands/wallets.py b/bittensor/commands/wallets.py index 5417aa5496..9e892b5524 100644 --- a/bittensor/commands/wallets.py +++ b/bittensor/commands/wallets.py @@ -20,6 +20,7 @@ import os import sys from rich.prompt import Prompt, Confirm +from rich.table import Table from typing import Optional, List from . import defaults @@ -527,3 +528,129 @@ def check_config(config: "bittensor.Config"): "Enter wallet name", default=bittensor.defaults.wallet.name ) config.wallet.name = str(wallet_name) + + +def _get_coldkey_ss58_addresses_for_path(path: str) -> List[str]: + """Get all coldkey ss58 addresses from path.""" + + def list_coldkeypub_files(dir_path): + abspath = os.path.abspath(os.path.expanduser(dir_path)) + coldkey_files = [] + + for file in os.listdir(abspath): + coldkey_path = os.path.join(abspath, file, "coldkeypub.txt") + if os.path.exists(coldkey_path): + coldkey_files.append(coldkey_path) + else: + bittensor.logging.warning( + f"{coldkey_path} does not exist. Excluding..." + ) + return coldkey_files + + return [ + bittensor.keyfile(file).keypair.ss58_address + for file in list_coldkeypub_files(path) + ] + + +class WalletBalanceCommand: + @staticmethod + def run(cli): + """Check the balance of the wallet.""" + wallet_names = os.listdir(os.path.expanduser(cli.config.wallet.path)) + coldkeys = _get_coldkey_ss58_addresses_for_path(cli.config.wallet.path) + subtensor = bittensor.subtensor(config=cli.config) + + free_balances = [ + subtensor.get_balance(coldkeys[i]) for i in range(len(coldkeys)) + ] + staked_balances = [ + subtensor.get_total_stake_for_coldkey(coldkeys[i]) + for i in range(len(coldkeys)) + ] + total_free_balance = sum(free_balances) + total_staked_balance = sum(staked_balances) + + balances = { + name: (coldkey, free, staked) + for name, coldkey, free, staked in sorted( + zip(wallet_names, coldkeys, free_balances, staked_balances) + ) + } + + table = Table(show_footer=False) + table.title = "[white]Wallet Coldkey Balances" + table.add_column( + "[white]Wallet Name", + header_style="overline white", + footer_style="overline white", + style="rgb(50,163,219)", + no_wrap=True, + ) + + table.add_column( + "[white]Coldkey Address", + header_style="overline white", + footer_style="overline white", + style="rgb(50,163,219)", + no_wrap=True, + ) + + for typestr in ["Free", "Staked", "Total"]: + table.add_column( + f"[white]{typestr} Balance", + header_style="overline white", + footer_style="overline white", + justify="right", + style="green", + no_wrap=True, + ) + + for name, (coldkey, free, staked) in balances.items(): + table.add_row( + name, + coldkey, + str(free), + str(staked), + str(free + staked), + ) + table.add_row() + table.add_row( + "Total Balance Across All Coldkeys", + "", + str(total_free_balance), + str(total_staked_balance), + str(total_free_balance + total_staked_balance), + ) + table.show_footer = True + + table.box = None + table.pad_edge = False + table.width = None + bittensor.__console__.print(table) + + @staticmethod + def add_args(parser: argparse.ArgumentParser): + balance_parser = parser.add_parser( + "balance", help="""Checks the balance of the wallet.""" + ) + bittensor.wallet.add_args(balance_parser) + bittensor.subtensor.add_args(balance_parser) + + @staticmethod + def check_config(config: "bittensor.config"): + if not config.is_set("wallet.path") and not config.no_prompt: + path = Prompt.ask("Enter wallets path", default=defaults.wallet.path) + config.wallet.path = str(path) + + if not config.is_set("subtensor.network") and not config.no_prompt: + network = Prompt.ask( + "Enter network", + default=defaults.subtensor.network, + choices=bittensor.__networks__, + ) + config.subtensor.network = str(network) + ( + _, + config.subtensor.chain_endpoint, + ) = bittensor.subtensor.determine_chain_endpoint_and_network(str(network)) diff --git a/bittensor/dendrite.py b/bittensor/dendrite.py index 9e1cd0a5d9..a99b28e300 100644 --- a/bittensor/dendrite.py +++ b/bittensor/dendrite.py @@ -26,7 +26,7 @@ import aiohttp import bittensor from fastapi import Response -from typing import Union, Optional, List, Union +from typing import Union, Optional, List, Union, AsyncGenerator, Any class dendrite(torch.nn.Module): @@ -42,15 +42,52 @@ class dendrite(torch.nn.Module): Attributes: keypair: The wallet or keypair used for signing messages. + external_ip (str): The external IP address of the local system. + synapse_history (list): A list of Synapse objects representing the historical responses. Methods: __str__(): Returns a string representation of the Dendrite object. __repr__(): Returns a string representation of the Dendrite object, acting as a fallback for __str__(). + query(self, *args, **kwargs) -> Union[bittensor.Synapse, List[bittensor.Synapse]]: + Makes synchronous requests to one or multiple target Axons and returns responses. - Example: - >>> dendrite_obj = dendrite(wallet = bittensor.wallet() ) - >>> print(dendrite_obj) + forward(self, axons, synapse=bittensor.Synapse(), timeout=12, deserialize=True, run_async=True, streaming=False) -> bittensor.Synapse: + Asynchronously sends requests to one or multiple Axons and collates their responses. + + call(self, target_axon, synapse=bittensor.Synapse(), timeout=12.0, deserialize=True) -> bittensor.Synapse: + Asynchronously sends a request to a specified Axon and processes the response. + + call_stream(self, target_axon, synapse=bittensor.Synapse(), timeout=12.0, deserialize=True) -> AsyncGenerator[bittensor.Synapse, None]: + Sends a request to a specified Axon and yields an AsyncGenerator that contains streaming + response chunks before finally yielding the filled Synapse as the final element. + + preprocess_synapse_for_request(self, target_axon_info, synapse, timeout=12.0) -> bittensor.Synapse: + Preprocesses the synapse for making a request, including building headers and signing. + + process_server_response(self, server_response, json_response, local_synapse): + Processes the server response, updates the local synapse state, and merges headers. + + close_session(self): + Synchronously closes the internal aiohttp client session. + + aclose_session(self): + Asynchronously closes the internal aiohttp client session. + + NOTE: When working with async aiohttp client sessions, it is recommended to use a context manager. + + Example with a context manager: + >>> aysnc with dendrite(wallet = bittensor.wallet()) as d: + >>> print(d) + >>> d( ) # ping axon + >>> d( [] ) # ping multiple + >>> d( bittensor.axon(), bittensor.Synapse ) + + However, you are able to safely call dendrite.query() without a context manager in a synchronous setting. + + Example without a context manager: + >>> d = dendrite(wallet = bittensor.wallet() ) + >>> print(d) >>> d( ) # ping axon >>> d( [] ) # ping multiple >>> d( bittensor.axon(), bittensor.Synapse ) @@ -87,21 +124,94 @@ def __init__( @property async def session(self) -> aiohttp.ClientSession: + """ + Asynchronous property that provides access to the internal aiohttp client session. + + If the session is not already initialized, this property will instantiate a new + aiohttp.ClientSession and return it. + + Returns: + aiohttp.ClientSession: The aiohttp client session instance. + """ if self._session is None: self._session = aiohttp.ClientSession() return self._session - async def close_session(self): + def close_session(self): + """ + Closes the internal aiohttp client session in a synchronous manner. + + This method ensures that the resources tied with the aiohttp client session are released. + It should be called when the session is no longer needed, typically during the cleanup phase. + + Usage: + dendrite_instance.close_session() + """ + if self._session: + loop = asyncio.get_event_loop() + loop.run_until_complete(self._session.close()) + self._session = None + + async def aclose_session(self): + """ + Asynchronously closes the internal aiohttp client session. + + Similar to the synchronous `close_session` method but designed to be used within + asynchronous contexts. This method ensures that all related resources are released. + + Usage: + await dendrite_instance.aclose_session() + """ if self._session: await self._session.close() self._session = None + def _get_endpoint_url(self, target_axon, request_name): + endpoint = ( + f"0.0.0.0:{str(target_axon.port)}" + if target_axon.ip == str(self.external_ip) + else f"{target_axon.ip}:{str(target_axon.port)}" + ) + return f"http://{endpoint}/{request_name}" + + def _handle_request_errors(self, synapse, request_name, exception): + if isinstance(exception, aiohttp.ClientConnectorError): + synapse.dendrite.status_code = "503" + synapse.dendrite.status_message = f"Service at {synapse.axon.ip}:{str(synapse.axon.port)}/{request_name} unavailable." + elif isinstance(exception, asyncio.TimeoutError): + synapse.dendrite.status_code = "408" + synapse.dendrite.status_message = ( + f"Timedout after {synapse.timeout} seconds." + ) + else: + synapse.dendrite.status_code = "422" + synapse.dendrite.status_message = ( + f"Failed to parse response object with error: {str(exception)}" + ) + + def _log_outgoing_request(self, synapse): + bittensor.logging.debug( + f"dendrite | --> | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | 0 | Success" + ) + + def _log_incoming_response(self, synapse): + bittensor.logging.debug( + f"dendrite | <-- | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | {synapse.dendrite.status_code} | {synapse.dendrite.status_message}" + ) + def query( self, *args, **kwargs - ) -> Union[bittensor.Synapse, List[bittensor.Synapse]]: + ) -> Union[ + bittensor.Synapse, + List[bittensor.Synapse], + bittensor.StreamingSynapse, + List[bittensor.StreamingSynapse], + ]: """ Makes a synchronous request to multiple target Axons and returns the server responses. + Cleanup is automatically handled and sessions are closed upon completed requests. + Args: axons (Union[List[Union['bittensor.AxonInfo', 'bittensor.axon']], Union['bittensor.AxonInfo', 'bittensor.axon']]): The list of target Axon information. @@ -115,13 +225,16 @@ def query( """ try: loop = asyncio.get_event_loop() - return loop.run_until_complete(self.forward(*args, **kwargs)) + result = loop.run_until_complete(self.forward(*args, **kwargs)) except: new_loop = asyncio.new_event_loop() asyncio.set_event_loop(new_loop) result = loop.run_until_complete(self.forward(*args, **kwargs)) new_loop.close() return result + finally: + self.close_session() + return result async def forward( self, @@ -133,21 +246,39 @@ async def forward( timeout: float = 12, deserialize: bool = True, run_async: bool = True, - ) -> bittensor.Synapse: + streaming: bool = False, + ) -> List[Union[AsyncGenerator[Any], bittenst.Synapse, bittensor.StreamingSynapse]]: """ - Makes asynchronous requests to multiple target Axons and returns the server responses. + Asynchronously sends requests to one or multiple Axons and collates their responses. + + This function acts as a bridge for sending multiple requests concurrently or sequentially + based on the provided parameters. It checks the type of the target Axons, preprocesses + the requests, and then sends them off. After getting the responses, it processes and + collates them into a unified format. + + When querying an Axon that sends back data in chunks using the Dendrite, this function + returns an AsyncGenerator that yields each chunk as it is received. The generator can be + iterated over to process each chunk individually. + + For example: + >>> ... + >>> dendrte = bittensor.dendrite(wallet = wallet) + >>> async for chunk in dendrite.forward(axons, synapse, timeout, deserialize, run_async, streaming): + >>> # Process each chunk here + >>> print(chunk) Args: axons (Union[List[Union['bittensor.AxonInfo', 'bittensor.axon']], Union['bittensor.AxonInfo', 'bittensor.axon']]): - The list of target Axon information. - synapse (bittensor.Synapse, optional): The Synapse object. Defaults to bittensor.Synapse(). - timeout (float, optional): The request timeout duration in seconds. - Defaults to 12.0 seconds. + The target Axons to send requests to. Can be a single Axon or a list of Axons. + synapse (bittensor.Synapse, optional): The Synapse object encapsulating the data. Defaults to a new bittensor.Synapse instance. + timeout (float, optional): Maximum duration to wait for a response from an Axon in seconds. Defaults to 12.0. + deserialize (bool, optional): Determines if the received response should be deserialized. Defaults to True. + run_async (bool, optional): If True, sends requests concurrently. Otherwise, sends requests sequentially. Defaults to True. + streaming (bool, optional): Indicates if the response is expected to be in streaming format. Defaults to False. Returns: - Union[bittensor.Synapse, List[bittensor.Synapse]]: If a single target axon is provided, - returns the response from that axon. If multiple target axons are provided, - returns a list of responses from all target axons. + Union[AsyncGenerator, bittensor.Synapse, List[bittensor.Synapse]]: If a single Axon is targeted, returns its response. + If multiple Axons are targeted, returns a list of their responses. """ is_list = True # If a single axon is provided, wrap it in a list for uniform processing @@ -155,44 +286,72 @@ async def forward( is_list = False axons = [axons] - # This asynchronous function is used to send queries to all axons. - async def query_all_axons() -> List[bittensor.Synapse]: - # If the 'run_async' flag is not set, the code runs synchronously. - if not run_async: - # Create an empty list to hold the responses from all axons. - all_responses = [] - # Loop through each axon in the 'axons' list. - for target_axon in axons: - # The response from each axon is then appended to the 'all_responses' list. - all_responses.append( - await self.call( - target_axon=target_axon, - synapse=synapse.copy(), - timeout=timeout, - deserialize=deserialize, - ) + # Check if synapse is an instance of the StreamingSynapse class or if streaming flag is set. + is_streaming_subclass = issubclass( + synapse.__class__, bittensor.StreamingSynapse + ) + if streaming != is_streaming_subclass: + bittensor.logging.warning( + f"Argument streaming is {streaming} while issubclass(synapse, StreamingSynapse) is {synapse.__class__.__name__}. This may cause unexpected behavior." + ) + streaming = is_streaming_subclass or streaming + + async def query_all_axons( + is_stream: bool, + ) -> Union[AsyncGenerator[Any], bittenst.Synapse, bittensor.StreamingSynapse]: + """ + Handles requests for all axons, either in streaming or non-streaming mode. + + Args: + is_stream: If True, handles the axons in streaming mode. + + Returns: + List of Synapse objects with responses. + """ + + async def single_axon_response( + target_axon, + ) -> Union[ + AsyncGenerator[Any], bittenst.Synapse, bittensor.StreamingSynapse + ]: + """ + Retrieve response for a single axon, either in streaming or non-streaming mode. + + Args: + target_axon: The target axon to send request to. + + Returns: + A Synapse object with the response. + """ + if is_stream: + # If in streaming mode, return the async_generator + return self.call_stream( + target_axon=target_axon, + synapse=synapse.copy(), + timeout=timeout, + deserialize=deserialize, ) - # The function then returns a list of responses from all axons. - return all_responses - else: - # Here we build a list of coroutines without awaiting them. - coroutines = [ - self.call( + else: + # If not in streaming mode, simply call the axon and get the response. + return await self.call( target_axon=target_axon, synapse=synapse.copy(), timeout=timeout, deserialize=deserialize, ) - for target_axon in axons - ] - # 'asyncio.gather' is a method which takes multiple coroutines and runs them in parallel. - all_responses = await asyncio.gather(*coroutines) - # The function then returns a list of responses from all axons. - return all_responses - # Run all requests concurrently and get the responses - responses = await query_all_axons() + # If run_async flag is False, get responses one by one. + if not run_async: + return [ + await single_axon_response(target_axon) for target_axon in axons + ] + # If run_async flag is True, get responses concurrently using asyncio.gather(). + return await asyncio.gather( + *(single_axon_response(target_axon) for target_axon in axons) + ) + # Get responses for all axons. + responses = await query_all_axons(streaming) # Return the single response if only one axon was targeted, else return all responses if len(responses) == 1 and not is_list: return responses[0] @@ -207,19 +366,97 @@ async def call( deserialize: bool = True, ) -> bittensor.Synapse: """ - Makes an asynchronous request to the target Axon, processes the server - response and returns the updated Synapse. + Asynchronously sends a request to a specified Axon and processes the response. + + This function establishes a connection with a specified Axon, sends the encapsulated + data through the Synapse object, waits for a response, processes it, and then + returns the updated Synapse object. Args: - target_axon (Union['bittensor.AxonInfo', 'bittensor.axon']): The target Axon information. - synapse (bittensor.Synapse, optional): The Synapse object. Defaults to bittensor.Synapse(). - timeout (float, optional): The request timeout duration in seconds. - Defaults to 12.0 seconds. - deserialize (bool, optional): Whether to deserialize the returned Synapse. - Defaults to True. + target_axon (Union['bittensor.AxonInfo', 'bittensor.axon']): The target Axon to send the request to. + synapse (bittensor.Synapse, optional): The Synapse object encapsulating the data. Defaults to a new bittensor.Synapse instance. + timeout (float, optional): Maximum duration to wait for a response from the Axon in seconds. Defaults to 12.0. + deserialize (bool, optional): Determines if the received response should be deserialized. Defaults to True. Returns: - bittensor.Synapse: The updated Synapse object after processing server response. + bittensor.Synapse: The Synapse object, updated with the response data from the Axon. + """ + + # Record start time + start_time = time.time() + target_axon = ( + target_axon.info() + if isinstance(target_axon, bittensor.axon) + else target_axon + ) + + # Build request endpoint from the synapse class + request_name = synapse.__class__.__name__ + url = self._get_endpoint_url(target_axon, request_name=request_name) + + # Preprocess synapse for making a request + synapse = self.preprocess_synapse_for_request(target_axon, synapse, timeout) + + try: + # Log outgoing request + self._log_outgoing_request(synapse) + + # Make the HTTP POST request + async with (await self.session).post( + url, + headers=synapse.to_headers(), + json=synapse.dict(), + timeout=timeout, + ) as response: + # Extract the JSON response from the server + json_response = await response.json() + # Process the server response and fill synapse + self.process_server_response(response, json_response, synapse) + + # Set process time and log the response + synapse.dendrite.process_time = str(time.time() - start_time) + + except Exception as e: + self._handle_request_errors(synapse, request_name, e) + + finally: + self._log_incoming_response(synapse) + + # Log synapse event history + self.synapse_history.append( + bittensor.Synapse.from_headers(synapse.to_headers()) + ) + + # Return the updated synapse object after deserializing if requested + if deserialize: + return synapse.deserialize() + else: + return synapse + + async def call_stream( + self, + target_axon: Union[bittensor.AxonInfo, bittensor.axon], + synapse: bittensor.Synapse = bittensor.Synapse(), + timeout: float = 12.0, + deserialize: bool = True, + ) -> AsyncGenerator[Any]: + """ + Sends a request to a specified Axon and yields streaming responses. + + Similar to `call`, but designed for scenarios where the Axon sends back data in + multiple chunks or streams. The function yields each chunk as it is received. This is + useful for processing large responses piece by piece without waiting for the entire + data to be transmitted. + + Args: + target_axon (Union['bittensor.AxonInfo', 'bittensor.axon']): The target Axon to send the request to. + synapse (bittensor.Synapse, optional): The Synapse object encapsulating the data. Defaults to a new bittensor.Synapse instance. + timeout (float, optional): Maximum duration to wait for a response (or a chunk of the response) from the Axon in seconds. Defaults to 12.0. + deserialize (bool, optional): Determines if each received chunk should be deserialized. Defaults to True. + + Yields: + object: Each yielded object contains a chunk of the arbitrary response data from the Axon. + bittensor.Synapse: After the AsyncGenerator has been exhausted, yields the final filled Synapse. """ # Record start time @@ -244,9 +481,7 @@ async def call( try: # Log outgoing request - bittensor.logging.debug( - f"dendrite | --> | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | 0 | Success" - ) + self._log_outgoing_request(synapse) # Make the HTTP POST request async with (await self.session).post( @@ -255,16 +490,10 @@ async def call( json=synapse.dict(), timeout=timeout, ) as response: - if ( - response.headers.get("Content-Type", "").lower() - == "text/event-stream".lower() - ): # identify streaming response - await synapse.process_streaming_response( - response - ) # process the entire streaming response - json_response = synapse.extract_response_json(response) - else: - json_response = await response.json() + # Use synapse subclass' process_streaming_response method to yield the response chunks + async for chunk in synapse.process_streaming_response(response): + yield chunk # Yield each chunk as it's processed + json_response = synapse.extract_response_json(response) # Process the server response self.process_server_response(response, json_response, synapse) @@ -272,24 +501,11 @@ async def call( # Set process time and log the response synapse.dendrite.process_time = str(time.time() - start_time) - except aiohttp.ClientConnectorError as e: - synapse.dendrite.status_code = "503" - synapse.dendrite.status_message = f"Service at {synapse.axon.ip}:{str(synapse.axon.port)}/{request_name} unavailable." - - except asyncio.TimeoutError as e: - synapse.dendrite.status_code = "408" - synapse.dendrite.status_message = f"Timedout after {timeout} seconds." - except Exception as e: - synapse.dendrite.status_code = "422" - synapse.dendrite.status_message = ( - f"Failed to parse response object with error: {str(e)}" - ) + self._handle_request_errors(synapse, request_name, e) finally: - bittensor.logging.debug( - f"dendrite | <-- | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | {synapse.dendrite.status_code} | {synapse.dendrite.status_message}" - ) + self._log_incoming_response(synapse) # Log synapse event history self.synapse_history.append( @@ -298,9 +514,9 @@ async def call( # Return the updated synapse object after deserializing if requested if deserialize: - return synapse.deserialize() + yield synapse.deserialize() else: - return synapse + yield synapse def preprocess_synapse_for_request( self, @@ -423,3 +639,55 @@ def __repr__(self) -> str: str: The string representation of the Dendrite object in the format "dendrite()". """ return self.__str__() + + async def __aenter__(self): + """ + Asynchronous context manager entry method. + + Enables the use of the `async with` statement with the Dendrite instance. When entering the context, + the current instance of the class is returned, making it accessible within the asynchronous context. + + Returns: + Dendrite: The current instance of the Dendrite class. + + Usage: + async with Dendrite() as dendrite: + await dendrite.some_async_method() + """ + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + """ + Asynchronous context manager exit method. + + Ensures proper cleanup when exiting the `async with` context. This method will close the aiohttp client session + asynchronously, releasing any tied resources. + + Args: + exc_type (Type[BaseException], optional): The type of exception that was raised. + exc_value (BaseException, optional): The instance of exception that was raised. + traceback (TracebackType, optional): A traceback object encapsulating the call stack at the point + where the exception was raised. + + Usage: + async with Dendrite() as dendrite: + await dendrite.some_async_method() + """ + await self.aclose_session() + + def __del__(self): + """ + Dendrite destructor. + + This method is invoked when the Dendrite instance is about to be destroyed. The destructor ensures that the + aiohttp client session is closed before the instance is fully destroyed, releasing any remaining resources. + + Note: Relying on the destructor for cleanup can be unpredictable. It's recommended to explicitly close sessions + using the provided methods or the `async with` context manager. + + Usage: + dendrite = Dendrite() + # ... some operations ... + del dendrite # This will implicitly invoke the __del__ method. + """ + asyncio.run(self.aclose_session()) diff --git a/bittensor/extrinsics/registration.py b/bittensor/extrinsics/registration.py index 9799234363..c759fbcb58 100644 --- a/bittensor/extrinsics/registration.py +++ b/bittensor/extrinsics/registration.py @@ -321,6 +321,14 @@ def burned_register_extrinsic( ) +class MaxSuccessException(Exception): + pass + + +class MaxAttemptsException(Exception): + pass + + def run_faucet_extrinsic( subtensor: "bittensor.subtensor", wallet: "bittensor.wallet", @@ -384,6 +392,7 @@ def run_faucet_extrinsic( # Attempt rolling registration. attempts = 1 + successes = 1 while True: try: pow_result = None @@ -441,6 +450,9 @@ def run_faucet_extrinsic( bittensor.__console__.print( f":cross_mark: [red]Failed[/red]: Error: {response.error_message}" ) + if attempts == max_allowed_attempts: + raise MaxAttemptsException + attempts += 1 # Successful registration else: @@ -450,5 +462,15 @@ def run_faucet_extrinsic( ) old_balance = new_balance + if successes == 3: + raise MaxSuccessException + successes += 1 + except KeyboardInterrupt: return True, "Done" + + except MaxSuccessException: + return True, f"Max successes reached: {3}" + + except MaxAttemptedException: + return False, f"Max attempts reached: {max_allowed_attempts}" diff --git a/bittensor/metagraph.py b/bittensor/metagraph.py index 2c31ccf6b1..789fb3b7a8 100644 --- a/bittensor/metagraph.py +++ b/bittensor/metagraph.py @@ -340,7 +340,6 @@ def sync( block: Optional[int] = None, lite: bool = True, subtensor: Optional["bittensor.subtensor"] = None, - root: bool = False, ) -> "metagraph": """ Initiates the synchronization process of the metagraph. @@ -364,7 +363,7 @@ def sync( # If not a 'lite' version, compute and set weights and bonds for each neuron if not lite: - self._set_weights_and_bonds(root=root, subtensor=subtensor) + self._set_weights_and_bonds(subtensor=subtensor) def _initialize_subtensor(self, subtensor): """ @@ -474,9 +473,7 @@ def _create_tensor(self, data, dtype) -> torch.nn.Parameter: # TODO: Check and test the creation of tensor return torch.nn.Parameter(torch.tensor(data, dtype=dtype), requires_grad=False) - def _set_weights_and_bonds( - self, root: bool = False, subtensor: bittensor.subtensor = None - ): + def _set_weights_and_bonds(self, subtensor: bittensor.subtensor = None): """ Computes and sets weights and bonds for each neuron. @@ -484,7 +481,7 @@ def _set_weights_and_bonds( None. """ # TODO: Check and test the computation of weights and bonds - if root: + if self.netuid == 0: self.weights = self._process_root_weights( [neuron.weights for neuron in self.neurons], "weights", subtensor ) diff --git a/bittensor/subtensor.py b/bittensor/subtensor.py index 99b15d5060..9eda2fada1 100644 --- a/bittensor/subtensor.py +++ b/bittensor/subtensor.py @@ -2310,7 +2310,6 @@ def metagraph( netuid: int, lite: bool = True, block: Optional[int] = None, - root: bool = False, ) -> "bittensor.Metagraph": r"""Returns a synced metagraph for the subnet. Args: @@ -2327,7 +2326,7 @@ def metagraph( metagraph_ = bittensor.metagraph( network=self.network, netuid=netuid, lite=lite, sync=False ) - metagraph_.sync(block=block, lite=lite, subtensor=self, root=root) + metagraph_.sync(block=block, lite=lite, subtensor=self) return metagraph_ diff --git a/bittensor/synapse.py b/bittensor/synapse.py index 0cb3a118b9..cb139d6648 100644 --- a/bittensor/synapse.py +++ b/bittensor/synapse.py @@ -410,7 +410,6 @@ def to_headers(self) -> dict: Headers for 'name' and 'timeout' are directly taken from the instance. Further headers are constructed from the properties 'axon' and 'dendrite'. - If the object is a tensor, its shape and data type are added to the headers. For non-optional objects, these are serialized and encoded before adding to the headers. Finally, the function adds the sizes of the headers and the total size to the headers. @@ -441,7 +440,7 @@ def to_headers(self) -> dict: property_type_hints = typing.get_type_hints(self) # Getting the fields of the instance - instance_fields = self.__dict__ + instance_fields = self.dict() # Iterating over the fields of the instance for field, value in instance_fields.items(): @@ -454,28 +453,6 @@ def to_headers(self) -> dict: if field in headers or value is None: continue - # Adding the tensor shape and data type to the headers if the object is a tensor - if isinstance(value, bittensor.Tensor): - headers[f"bt_header_tensor_{field}"] = f"{value.shape}-{value.dtype}" - - elif isinstance(value, list) and all( - isinstance(elem, bittensor.Tensor) for elem in value - ): - serialized_list_tensor = [] - for i, tensor in enumerate(value): - serialized_list_tensor.append(f"{tensor.shape}-{tensor.dtype}") - headers[f"bt_header_list_tensor_{field}"] = str(serialized_list_tensor) - - elif isinstance(value, dict) and all( - isinstance(elem, bittensor.Tensor) for elem in value.values() - ): - serialized_dict_tensor = [] - for key, tensor in value.items(): - serialized_dict_tensor.append( - f"{key}-{tensor.shape}-{tensor.dtype}" - ) - headers[f"bt_header_dict_tensor_{field}"] = str(serialized_dict_tensor) - elif required and field in required: try: # create an empty (dummy) instance of type(value) to pass pydantic validation on the axon side @@ -563,50 +540,6 @@ def parse_headers_to_inputs(cls, headers: dict) -> dict: f"Error while parsing 'dendrite' header {key}: {e}" ) continue - # Handle 'tensor' headers - elif "bt_header_tensor_" in key: - try: - new_key = key.split("bt_header_tensor_")[1] - shape, dtype = value.split("-") - # TODO: Verify if the shape and dtype values need to be converted before being used - inputs_dict[new_key] = bittensor.Tensor(shape=shape, dtype=dtype) - except Exception as e: - bittensor.logging.error( - f"Error while parsing 'tensor' header {key}: {e}" - ) - continue - elif "bt_header_list_tensor_" in key: - try: - new_key = key.split("bt_header_list_tensor_")[1] - deserialized_tensors = [] - stensors = ast.literal_eval(value) - for value in stensors: - shape, dtype = value.split("-") - deserialized_tensors.append( - bittensor.Tensor(shape=shape, dtype=dtype) - ) - inputs_dict[new_key] = deserialized_tensors - except Exception as e: - bittensor.logging.error( - f"Error while parsing 'tensor' header {key}: {e}" - ) - continue - elif "bt_header_dict_tensor_" in key: - try: - new_key = key.split("bt_header_dict_tensor_")[1] - deserialized_dict_tensors = {} - stensors = ast.literal_eval(value) - for value in stensors: - key, shape, dtype = value.split("-") - deserialized_dict_tensors[key] = bittensor.Tensor( - shape=shape, dtype=dtype - ) - inputs_dict[new_key] = deserialized_dict_tensors - except Exception as e: - bittensor.logging.error( - f"Error while parsing 'tensor' header {key}: {e}" - ) - continue # Handle 'input_obj' headers elif "bt_header_input_obj" in key: try: diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py index 6324461965..dea833d72b 100644 --- a/bittensor/utils/__init__.py +++ b/bittensor/utils/__init__.py @@ -26,7 +26,7 @@ from substrateinterface.utils import ss58 as ss58 from .wallet_utils import * -from .registration import create_pow as create_pow, __reregister_wallet as reregister +from .registration import create_pow as create_pow RAOPERTAO = 1e9 U16_MAX = 65535 diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py index d86f7d85fc..29a1d5d7f3 100644 --- a/bittensor/utils/registration.py +++ b/bittensor/utils/registration.py @@ -1087,51 +1087,3 @@ def create_pow( ) return solution - - -def __reregister_wallet( - netuid: int, - wallet: "bittensor.wallet", - subtensor: "bittensor.subtensor", - reregister: bool = False, - prompt: bool = False, - **registration_args: Any, -) -> Optional["bittensor.wallet"]: - """Re-register this a wallet on the chain, or exits. - Exits if the wallet is not registered on the chain AND - reregister is set to False. - Args: - netuid (int): - The network uid of the subnet to register on. - wallet( 'bittensor.wallet' ): - Bittensor wallet to re-register - reregister (bool, default=False): - If true, re-registers the wallet on the chain. - Exits if False and the wallet is not registered on the chain. - prompt (bool): - If true, the call waits for confirmation from the user before proceeding. - **registration_args (Any): - The registration arguments to pass to the subtensor register function. - Return: - wallet (bittensor.wallet): - The wallet - - Raises: - SytemExit(0): - If the wallet is not registered on the chain AND - the config.subtensor.reregister flag is set to False. - """ - wallet.hotkey - - if not subtensor.is_hotkey_registered_on_subnet( - hotkey_ss58=wallet.hotkey.ss58_address, netuid=netuid - ): - # Check if the wallet should reregister - if not reregister: - sys.exit(0) - - subtensor.register( - wallet=wallet, netuid=netuid, prompt=prompt, **registration_args - ) - - return wallet diff --git a/bittensor/utils/registratrion_old.py b/bittensor/utils/registratrion_old.py deleted file mode 100644 index 453772d649..0000000000 --- a/bittensor/utils/registratrion_old.py +++ /dev/null @@ -1,1030 +0,0 @@ -import binascii -import hashlib -import math -import multiprocessing -import os -import random -import time -from dataclasses import dataclass -from datetime import timedelta -from queue import Empty, Full -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import backoff -import bittensor -import torch -from Crypto.Hash import keccak -from rich import console as rich_console -from rich import status as rich_status - -from ._register_cuda import solve_cuda - - -class CUDAException(Exception): - """An exception raised when an error occurs in the CUDA environment.""" - - pass - - -def hex_bytes_to_u8_list(hex_bytes: bytes): - hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)] - return hex_chunks - - -def u8_list_to_hex(values: list): - total = 0 - for val in reversed(values): - total = (total << 8) + val - return total - - -def create_seal_hash(block_hash: bytes, nonce: int) -> bytes: - block_bytes = block_hash.encode("utf-8")[2:] - nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) - pre_seal = nonce_bytes + block_bytes - seal_sh256 = hashlib.sha256(bytearray(hex_bytes_to_u8_list(pre_seal))).digest() - kec = keccak.new(digest_bits=256) - seal = kec.update(seal_sh256).digest() - return seal - - -def seal_meets_difficulty(seal: bytes, difficulty: int): - seal_number = int.from_bytes(seal, "big") - product = seal_number * difficulty - limit = int(math.pow(2, 256)) - 1 - if product > limit: - return False - else: - return True - - -def solve_for_difficulty(block_hash, difficulty): - meets = False - nonce = -1 - while not meets: - nonce += 1 - seal = create_seal_hash(block_hash, nonce) - meets = seal_meets_difficulty(seal, difficulty) - if nonce > 1: - break - return nonce, seal - - -def get_human_readable(num, suffix="H"): - for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: - if abs(num) < 1000.0: - return f"{num:3.1f}{unit}{suffix}" - num /= 1000.0 - return f"{num:.1f}Y{suffix}" - - -def millify(n: int): - millnames = ["", " K", " M", " B", " T"] - n = float(n) - millidx = max( - 0, - min( - len(millnames) - 1, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)) - ), - ) - - return "{:.2f}{}".format(n / 10 ** (3 * millidx), millnames[millidx]) - - -def POWNotStale(subtensor: "bittensor.subtensor", pow_result: Dict) -> bool: - """Returns True if the POW is not stale. - This means the block the POW is solved for is within 3 blocks of the current block. - """ - return pow_result["block_number"] >= subtensor.get_current_block() - 3 - - -@dataclass -class POWSolution: - """A solution to the registration PoW problem.""" - - nonce: int - block_number: int - difficulty: int - seal: bytes - - -class SolverBase(multiprocessing.Process): - """ - A process that solves the registration PoW problem. - Args: - proc_num: int - The number of the process being created. - num_proc: int - The total number of processes running. - update_interval: int - The number of nonces to try to solve before checking for a new block. - finished_queue: multiprocessing.Queue - The queue to put the process number when a process finishes each update_interval. - Used for calculating the average time per update_interval across all processes. - solution_queue: multiprocessing.Queue - The queue to put the solution the process has found during the pow solve. - newBlockEvent: multiprocessing.Event - The event to set by the main process when a new block is finalized in the network. - The solver process will check for the event after each update_interval. - The solver process will get the new block hash and difficulty and start solving for a new nonce. - stopEvent: multiprocessing.Event - The event to set by the main process when all the solver processes should stop. - The solver process will check for the event after each update_interval. - The solver process will stop when the event is set. - Used to stop the solver processes when a solution is found. - curr_block: multiprocessing.Array - The array containing this process's current block hash. - The main process will set the array to the new block hash when a new block is finalized in the network. - The solver process will get the new block hash from this array when newBlockEvent is set. - curr_block_num: multiprocessing.Value - The value containing this process's current block number. - The main process will set the value to the new block number when a new block is finalized in the network. - The solver process will get the new block number from this value when newBlockEvent is set. - curr_diff: multiprocessing.Array - The array containing this process's current difficulty. - The main process will set the array to the new difficulty when a new block is finalized in the network. - The solver process will get the new difficulty from this array when newBlockEvent is set. - check_block: multiprocessing.Lock - The lock to prevent this process from getting the new block data while the main process is updating the data. - limit: int - The limit of the pow solve for a valid solution. - """ - - proc_num: int - num_proc: int - update_interval: int - finished_queue: multiprocessing.Queue - solution_queue: multiprocessing.Queue - newBlockEvent: multiprocessing.Event - stopEvent: multiprocessing.Event - curr_block: multiprocessing.Array - curr_block_num: multiprocessing.Value - curr_diff: multiprocessing.Array - check_block: multiprocessing.Lock - limit: int - - def __init__( - self, - proc_num, - num_proc, - update_interval, - finished_queue, - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - ): - multiprocessing.Process.__init__(self, daemon=True) - self.proc_num = proc_num - self.num_proc = num_proc - self.update_interval = update_interval - self.finished_queue = finished_queue - self.solution_queue = solution_queue - self.newBlockEvent = multiprocessing.Event() - self.newBlockEvent.clear() - self.curr_block = curr_block - self.curr_block_num = curr_block_num - self.curr_diff = curr_diff - self.check_block = check_block - self.stopEvent = stopEvent - self.limit = limit - - def run(self): - raise NotImplementedError("SolverBase is an abstract class") - - -class Solver(SolverBase): - def run(self): - block_number: int - block_bytes: bytes - block_difficulty: int - nonce_limit = int(math.pow(2, 64)) - 1 - - # Start at random nonce - nonce_start = random.randint(0, nonce_limit) - nonce_end = nonce_start + self.update_interval - while not self.stopEvent.is_set(): - if self.newBlockEvent.is_set(): - with self.check_block: - block_number = self.curr_block_num.value - block_bytes = bytes(self.curr_block) - block_difficulty = registration_diff_unpack(self.curr_diff) - - self.newBlockEvent.clear() - - # Do a block of nonces - solution = solve_for_nonce_block( - self, - nonce_start, - nonce_end, - block_bytes, - block_difficulty, - self.limit, - block_number, - ) - if solution is not None: - self.solution_queue.put(solution) - - try: - # Send time - self.finished_queue.put_nowait(self.proc_num) - except Full: - pass - - nonce_start = random.randint(0, nonce_limit) - nonce_start = nonce_start % nonce_limit - nonce_end = nonce_start + self.update_interval - - -class CUDASolver(SolverBase): - dev_id: int - TPB: int - - def __init__( - self, - proc_num, - num_proc, - update_interval, - finished_queue, - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - dev_id: int, - TPB: int, - ): - super().__init__( - proc_num, - num_proc, - update_interval, - finished_queue, - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - ) - self.dev_id = dev_id - self.TPB = TPB - - def run(self): - block_number: int = 0 # dummy value - block_bytes: bytes = b"0" * 32 # dummy value - block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value - nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX - - # Start at random nonce - nonce_start = random.randint(0, nonce_limit) - while not self.stopEvent.is_set(): - if self.newBlockEvent.is_set(): - with self.check_block: - block_number = self.curr_block_num.value - block_bytes = bytes(self.curr_block) - block_difficulty = registration_diff_unpack(self.curr_diff) - - self.newBlockEvent.clear() - - # Do a block of nonces - solution = solve_for_nonce_block_cuda( - self, - nonce_start, - self.update_interval, - block_bytes, - block_difficulty, - self.limit, - block_number, - self.dev_id, - self.TPB, - ) - if solution is not None: - self.solution_queue.put(solution) - - try: - # Signal that a nonce_block was finished using queue - # send our proc_num - self.finished_queue.put(self.proc_num) - except Full: - pass - - # increase nonce by number of nonces processed - nonce_start += self.update_interval * self.TPB - nonce_start = nonce_start % nonce_limit - - -def solve_for_nonce_block_cuda( - solver: CUDASolver, - nonce_start: int, - update_interval: int, - block_bytes: bytes, - difficulty: int, - limit: int, - block_number: int, - dev_id: int, - TPB: int, -) -> Optional[POWSolution]: - """Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * TPB""" - solution, seal = solve_cuda( - nonce_start, update_interval, TPB, block_bytes, difficulty, limit, dev_id - ) - - if solution != -1: - # Check if solution is valid (i.e. not -1) - return POWSolution(solution, block_number, difficulty, seal) - - return None - - -def solve_for_nonce_block( - solver: Solver, - nonce_start: int, - nonce_end: int, - block_bytes: bytes, - difficulty: int, - limit: int, - block_number: int, -) -> Optional[POWSolution]: - """Tries to solve the POW for a block of nonces (nonce_start, nonce_end)""" - for nonce in range(nonce_start, nonce_end): - # Create seal. - nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) - pre_seal = nonce_bytes + block_bytes - seal_sh256 = hashlib.sha256(bytearray(hex_bytes_to_u8_list(pre_seal))).digest() - kec = keccak.new(digest_bits=256) - seal = kec.update(seal_sh256).digest() - seal_number = int.from_bytes(seal, "big") - - # Check if seal meets difficulty - product = seal_number * difficulty - if product < limit: - # Found a solution, save it. - return POWSolution(nonce, block_number, difficulty, seal) - - return None - - -def registration_diff_unpack(packed_diff: multiprocessing.Array) -> int: - """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian.""" - return int(packed_diff[0] << 32 | packed_diff[1]) - - -def registration_diff_pack(diff: int, packed_diff: multiprocessing.Array): - """Packs the difficulty into two 32-bit integers. Little endian.""" - packed_diff[0] = diff >> 32 - packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits - - -def update_curr_block( - curr_diff: multiprocessing.Array, - curr_block: multiprocessing.Array, - curr_block_num: multiprocessing.Value, - block_number: int, - block_bytes: bytes, - diff: int, - lock: multiprocessing.Lock, -): - with lock: - curr_block_num.value = block_number - for i in range(64): - curr_block[i] = block_bytes[i] - registration_diff_pack(diff, curr_diff) - - -def get_cpu_count(): - try: - return len(os.sched_getaffinity(0)) - except AttributeError: - # OSX does not have sched_getaffinity - return os.cpu_count() - - -@dataclass -class RegistrationStatistics: - """Statistics for a registration.""" - - time_spent_total: float - rounds_total: int - time_average: float - time_spent: float - hash_rate_perpetual: float - hash_rate: float - difficulty: int - block_number: int - block_hash: bytes - - -class RegistrationStatisticsLogger: - """Logs statistics for a registration.""" - - console: rich_console.Console - status: Optional[rich_status.Status] - - def __init__( - self, console: rich_console.Console, output_in_place: bool = True - ) -> None: - self.console = console - - if output_in_place: - self.status = self.console.status("Solving") - else: - self.status = None - - def start(self) -> None: - if self.status is not None: - self.status.start() - - def stop(self) -> None: - if self.status is not None: - self.status.stop() - - def get_status_message( - cls, stats: RegistrationStatistics, verbose: bool = False - ) -> str: - message = ( - "Solving\n" - + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n" - + ( - f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n" - + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n" - if verbose - else "" - ) - + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n" - + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / " - + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n" - + f"Block Number: [bold white]{stats.block_number}[/bold white]\n" - + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n" - ) - return message - - def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None: - if self.status is not None: - self.status.update(self.get_status_message(stats, verbose=verbose)) - else: - self.console.log(self.get_status_message(stats, verbose=verbose)) - - -def solve_for_difficulty_fast( - subtensor: "bittensor.subtensor", - wallet, - output_in_place: bool = True, - num_processes: Optional[int] = None, - update_interval: Optional[int] = None, - n_samples: int = 10, - alpha_: float = 0.80, - log_verbose: bool = False, -) -> Optional[POWSolution]: - """ - Solves the POW for registration using multiprocessing. - Args: - subtensor - Subtensor to connect to for block information and to submit. - wallet: - Wallet to use for registration. - output_in_place: bool - If true, prints the status in place. Otherwise, prints the status on a new line. - num_processes: int - Number of processes to use. - update_interval: int - Number of nonces to solve before updating block information. - n_samples: int - The number of samples of the hash_rate to keep for the EWMA - alpha_: float - The alpha for the EWMA for the hash_rate calculation - log_verbose: bool - If true, prints more verbose logging of the registration metrics. - Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. - Note: - - We can also modify the update interval to do smaller blocks of work, - while still updating the block information after a different number of nonces, - to increase the transparency of the process while still keeping the speed. - """ - if num_processes == None: - # get the number of allowed processes for this process - num_processes = min(1, get_cpu_count()) - - if update_interval is None: - update_interval = 50_000 - - limit = int(math.pow(2, 256)) - 1 - - curr_block = multiprocessing.Array("h", 64, lock=True) # byte array - curr_block_num = multiprocessing.Value("i", 0, lock=True) # int - curr_diff = multiprocessing.Array("Q", [0, 0], lock=True) # [high, low] - - # Establish communication queues - ## See the Solver class for more information on the queues. - stopEvent = multiprocessing.Event() - stopEvent.clear() - - solution_queue = multiprocessing.Queue() - finished_queues = [multiprocessing.Queue() for _ in range(num_processes)] - check_block = multiprocessing.Lock() - - # Start consumers - solvers = [ - Solver( - i, - num_processes, - update_interval, - finished_queues[i], - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - ) - for i in range(num_processes) - ] - - # Get first block - block_number = subtensor.get_current_block() - difficulty = subtensor.difficulty - block_hash = subtensor.substrate.get_block_hash(block_number) - while block_hash == None: - block_hash = subtensor.substrate.get_block_hash(block_number) - block_bytes = block_hash.encode("utf-8")[2:] - old_block_number = block_number - # Set to current block - update_curr_block( - curr_diff, - curr_block, - curr_block_num, - block_number, - block_bytes, - difficulty, - check_block, - ) - - # Set new block events for each solver to start at the initial block - for worker in solvers: - worker.newBlockEvent.set() - - for worker in solvers: - worker.start() # start the solver processes - - start_time = time.time() # time that the registration started - time_last = start_time # time that the last work blocks completed - - curr_stats = RegistrationStatistics( - time_spent_total=0.0, - time_average=0.0, - rounds_total=0, - time_spent=0.0, - hash_rate_perpetual=0.0, - hash_rate=0.0, - difficulty=difficulty, - block_number=block_number, - block_hash=block_hash, - ) - - start_time_perpetual = time.time() - - console = bittensor.__console__ - logger = RegistrationStatisticsLogger(console, output_in_place) - logger.start() - - solution = None - - hash_rates = [0] * n_samples # The last n true hash_rates - weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha - - while not subtensor.is_hotkey_registered(hotkey_ss58=wallet.hotkey.ss58_address): - # Wait until a solver finds a solution - try: - solution = solution_queue.get(block=True, timeout=0.25) - if solution is not None: - break - except Empty: - # No solution found, try again - pass - - # check for new block - old_block_number = check_for_newest_block_and_update( - subtensor=subtensor, - old_block_number=old_block_number, - curr_diff=curr_diff, - curr_block=curr_block, - curr_block_num=curr_block_num, - curr_stats=curr_stats, - update_curr_block=update_curr_block, - check_block=check_block, - solvers=solvers, - ) - - num_time = 0 - for finished_queue in finished_queues: - try: - proc_num = finished_queue.get(timeout=0.1) - num_time += 1 - - except Empty: - continue - - time_now = time.time() # get current time - time_since_last = time_now - time_last # get time since last work block(s) - if num_time > 0 and time_since_last > 0.0: - # create EWMA of the hash_rate to make measure more robust - - hash_rate_ = (num_time * update_interval) / time_since_last - hash_rates.append(hash_rate_) - hash_rates.pop(0) # remove the 0th data point - curr_stats.hash_rate = sum( - [hash_rates[i] * weights[i] for i in range(n_samples)] - ) / (sum(weights)) - - # update time last to now - time_last = time_now - - curr_stats.time_average = ( - curr_stats.time_average * curr_stats.rounds_total - + curr_stats.time_spent - ) / (curr_stats.rounds_total + num_time) - curr_stats.rounds_total += num_time - - # Update stats - curr_stats.time_spent = time_since_last - new_time_spent_total = time_now - start_time_perpetual - curr_stats.hash_rate_perpetual = ( - curr_stats.rounds_total * update_interval - ) / new_time_spent_total - curr_stats.time_spent_total = new_time_spent_total - - # Update the logger - logger.update(curr_stats, verbose=log_verbose) - - # exited while, solution contains the nonce or wallet is registered - stopEvent.set() # stop all other processes - logger.stop() - - # terminate and wait for all solvers to exit - terminate_workers_and_wait_for_exit(solvers) - - return solution - - -@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3) -def get_block_with_retry(subtensor: "bittensor.subtensor") -> Tuple[int, int, bytes]: - block_number = subtensor.get_current_block() - difficulty = subtensor.difficulty - block_hash = subtensor.substrate.get_block_hash(block_number) - if block_hash is None: - raise Exception( - "Network error. Could not connect to substrate to get block hash" - ) - return block_number, difficulty, block_hash - - -class UsingSpawnStartMethod: - def __init__(self, force: bool = False): - self._old_start_method = None - self._force = force - - def __enter__(self): - self._old_start_method = multiprocessing.get_start_method(allow_none=True) - if self._old_start_method == None: - self._old_start_method = "spawn" # default to spawn - - multiprocessing.set_start_method("spawn", force=self._force) - - def __exit__(self, *args): - # restore the old start method - multiprocessing.set_start_method(self._old_start_method, force=True) - - -def check_for_newest_block_and_update( - subtensor: "bittensor.subtensor", - old_block_number: int, - curr_diff: multiprocessing.Array, - curr_block: multiprocessing.Array, - curr_block_num: multiprocessing.Value, - update_curr_block: Callable, - check_block: "multiprocessing.Lock", - solvers: List[Solver], - curr_stats: RegistrationStatistics, -) -> int: - """ - Checks for a new block and updates the current block information if a new block is found. - Args: - subtensor (:obj:`bittensor.subtensor`, `required`): - The subtensor object to use for getting the current block. - old_block_number (:obj:`int`, `required`): - The old block number to check against. - curr_diff (:obj:`multiprocessing.Array`, `required`): - The current difficulty as a multiprocessing array. - curr_block (:obj:`multiprocessing.Array`, `required`): - Where the current block is stored as a multiprocessing array. - curr_block_num (:obj:`multiprocessing.Value`, `required`): - Where the current block number is stored as a multiprocessing value. - update_curr_block (:obj:`Callable`, `required`): - A function that updates the current block. - check_block (:obj:`multiprocessing.Lock`, `required`): - A mp lock that is used to check for a new block. - solvers (:obj:`List[Solver]`, `required`): - A list of solvers to update the current block for. - curr_stats (:obj:`RegistrationStatistics`, `required`): - The current registration statistics to update. - Returns: - (int) The current block number. - """ - block_number = subtensor.get_current_block() - if block_number != old_block_number: - old_block_number = block_number - # update block information - block_hash = subtensor.substrate.get_block_hash(block_number) - while block_hash == None: - block_hash = subtensor.substrate.get_block_hash(block_number) - block_bytes = block_hash.encode("utf-8")[2:] - difficulty = subtensor.difficulty - - update_curr_block( - curr_diff, - curr_block, - curr_block_num, - block_number, - block_bytes, - difficulty, - check_block, - ) - # Set new block events for each solver - - for worker in solvers: - worker.newBlockEvent.set() - - # update stats - curr_stats.block_number = block_number - curr_stats.block_hash = block_hash - curr_stats.difficulty = difficulty - - return old_block_number - - -def solve_for_difficulty_fast_cuda( - subtensor: "bittensor.subtensor", - wallet: "bittensor.wallet", - output_in_place: bool = True, - update_interval: int = 50_000, - TPB: int = 512, - dev_id: Union[List[int], int] = 0, - n_samples: int = 10, - alpha_: float = 0.80, - log_verbose: bool = False, -) -> Optional[POWSolution]: - """ - Solves the registration fast using CUDA - Args: - subtensor: bittensor.subtensor - The subtensor node to grab blocks - wallet: bittensor.wallet - The wallet to register - output_in_place: bool - If true, prints the output in place, otherwise prints to new lines - update_interval: int - The number of nonces to try before checking for more blocks - TPB: int - The number of threads per block. CUDA param that should match the GPU capability - dev_id: Union[List[int], int] - The CUDA device IDs to execute the registration on, either a single device or a list of devices - n_samples: int - The number of samples of the hash_rate to keep for the EWMA - alpha_: float - The alpha for the EWMA for the hash_rate calculation - log_verbose: bool - If true, prints more verbose logging of the registration metrics. - Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. - """ - if isinstance(dev_id, int): - dev_id = [dev_id] - elif dev_id is None: - dev_id = [0] - - if update_interval is None: - update_interval = 50_000 - - if not torch.cuda.is_available(): - raise Exception("CUDA not available") - - limit = int(math.pow(2, 256)) - 1 - - # Set mp start to use spawn so CUDA doesn't complain - with UsingSpawnStartMethod(force=True): - curr_block = multiprocessing.Array("h", 64, lock=True) # byte array - curr_block_num = multiprocessing.Value("i", 0, lock=True) # int - curr_diff = multiprocessing.Array("Q", [0, 0], lock=True) # [high, low] - - ## Create a worker per CUDA device - num_processes = len(dev_id) - - # Establish communication queues - stopEvent = multiprocessing.Event() - stopEvent.clear() - solution_queue = multiprocessing.Queue() - finished_queues = [multiprocessing.Queue() for _ in range(num_processes)] - check_block = multiprocessing.Lock() - - # Start workers - solvers = [ - CUDASolver( - i, - num_processes, - update_interval, - finished_queues[i], - solution_queue, - stopEvent, - curr_block, - curr_block_num, - curr_diff, - check_block, - limit, - dev_id[i], - TPB, - ) - for i in range(num_processes) - ] - - # Get first block - block_number = subtensor.get_current_block() - difficulty = subtensor.difficulty - block_hash = subtensor.substrate.get_block_hash(block_number) - while block_hash == None: - block_hash = subtensor.substrate.get_block_hash(block_number) - block_bytes = block_hash.encode("utf-8")[2:] - old_block_number = block_number - - # Set to current block - update_curr_block( - curr_diff, - curr_block, - curr_block_num, - block_number, - block_bytes, - difficulty, - check_block, - ) - - # Set new block events for each solver to start at the initial block - for worker in solvers: - worker.newBlockEvent.set() - - for worker in solvers: - worker.start() # start the solver processes - - start_time = time.time() # time that the registration started - time_last = start_time # time that the last work blocks completed - - curr_stats = RegistrationStatistics( - time_spent_total=0.0, - time_average=0.0, - rounds_total=0, - time_spent=0.0, - hash_rate_perpetual=0.0, - hash_rate=0.0, # EWMA hash_rate (H/s) - difficulty=difficulty, - block_number=block_number, - block_hash=block_hash, - ) - - start_time_perpetual = time.time() - - console = bittensor.__console__ - logger = RegistrationStatisticsLogger(console, output_in_place) - logger.start() - - hash_rates = [0] * n_samples # The last n true hash_rates - weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha - - solution = None - while not subtensor.is_hotkey_registered( - hotkey_ss58=wallet.hotkey.ss58_address - ): - # Wait until a solver finds a solution - try: - solution = solution_queue.get(block=True, timeout=0.15) - if solution is not None: - break - except Empty: - # No solution found, try again - pass - - # check for new block - old_block_number = check_for_newest_block_and_update( - subtensor=subtensor, - curr_diff=curr_diff, - curr_block=curr_block, - curr_block_num=curr_block_num, - old_block_number=old_block_number, - curr_stats=curr_stats, - update_curr_block=update_curr_block, - check_block=check_block, - solvers=solvers, - ) - - num_time = 0 - # Get times for each solver - for finished_queue in finished_queues: - try: - proc_num = finished_queue.get(timeout=0.1) - num_time += 1 - - except Empty: - continue - - time_now = time.time() # get current time - time_since_last = time_now - time_last # get time since last work block(s) - if num_time > 0 and time_since_last > 0.0: - # create EWMA of the hash_rate to make measure more robust - - hash_rate_ = (num_time * TPB * update_interval) / time_since_last - hash_rates.append(hash_rate_) - hash_rates.pop(0) # remove the 0th data point - curr_stats.hash_rate = sum( - [hash_rates[i] * weights[i] for i in range(n_samples)] - ) / (sum(weights)) - - # update time last to now - time_last = time_now - - curr_stats.time_average = ( - curr_stats.time_average * curr_stats.rounds_total - + curr_stats.time_spent - ) / (curr_stats.rounds_total + num_time) - curr_stats.rounds_total += num_time - - # Update stats - curr_stats.time_spent = time_since_last - new_time_spent_total = time_now - start_time_perpetual - curr_stats.hash_rate_perpetual = ( - curr_stats.rounds_total * (TPB * update_interval) - ) / new_time_spent_total - curr_stats.time_spent_total = new_time_spent_total - - # Update the logger - logger.update(curr_stats, verbose=log_verbose) - - # exited while, found_solution contains the nonce or wallet is registered - - stopEvent.set() # stop all other processes - logger.stop() - - # terminate and wait for all solvers to exit - terminate_workers_and_wait_for_exit(solvers) - - return solution - - -def terminate_workers_and_wait_for_exit(workers: List[multiprocessing.Process]) -> None: - for worker in workers: - worker.terminate() - worker.join() - - -def create_pow( - subtensor, - wallet, - output_in_place: bool = True, - cuda: bool = False, - dev_id: Union[List[int], int] = 0, - tpb: int = 256, - num_processes: int = None, - update_interval: int = None, - log_verbose: bool = False, -) -> Optional[Dict[str, Any]]: - if cuda: - solution: POWSolution = solve_for_difficulty_fast_cuda( - subtensor, - wallet, - output_in_place=output_in_place, - dev_id=dev_id, - TPB=tpb, - update_interval=update_interval, - log_verbose=log_verbose, - ) - else: - solution: POWSolution = solve_for_difficulty_fast( - subtensor, - wallet, - output_in_place=output_in_place, - num_processes=num_processes, - update_interval=update_interval, - log_verbose=log_verbose, - ) - - return ( - None - if solution is None - else { - "nonce": solution.nonce, - "difficulty": solution.difficulty, - "block_number": solution.block_number, - "work": binascii.hexlify(solution.seal), - } - ) diff --git a/tests/integration_tests/test_cli.py b/tests/integration_tests/test_cli.py index ffc973d78c..4dfb4caba9 100644 --- a/tests/integration_tests/test_cli.py +++ b/tests/integration_tests/test_cli.py @@ -1913,31 +1913,6 @@ def test_register(self, _): config = self.config config.command = "subnets" config.subcommand = "register" - config.register.num_processes = 1 - config.register.update_interval = 50_000 - config.no_prompt = True - - mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - with patch("bittensor.wallet", return_value=mock_wallet) as mock_create_wallet: - with patch( - "bittensor.extrinsics.registration.POWSolution.is_stale", - side_effect=MockException, - ) as mock_is_stale: - with pytest.raises(MockException): - cli = bittensor.cli(config) - cli.run() - mock_create_wallet.assert_called_once() - - self.assertEqual(mock_is_stale.call_count, 1) - - def test_recycle_register(self, _): - config = self.config - config.command = "subnets" - config.subcommand = "recycle_register" config.no_prompt = True mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) @@ -1961,6 +1936,31 @@ def test_recycle_register(self, _): self.assertTrue(registered) + def test_pow_register(self, _): + config = self.config + config.command = "subnets" + config.subcommand = "pow_register" + config.pow_register.num_processes = 1 + config.pow_register.update_interval = 50_000 + config.no_prompt = True + + mock_wallet = generate_wallet(hotkey=_get_mock_keypair(100, self.id())) + + class MockException(Exception): + pass + + with patch("bittensor.wallet", return_value=mock_wallet) as mock_create_wallet: + with patch( + "bittensor.extrinsics.registration.POWSolution.is_stale", + side_effect=MockException, + ) as mock_is_stale: + with pytest.raises(MockException): + cli = bittensor.cli(config) + cli.run() + mock_create_wallet.assert_called_once() + + self.assertEqual(mock_is_stale.call_count, 1) + def test_stake(self, _): amount_to_stake: Balance = Balance.from_tao(0.5) config = self.config diff --git a/tests/integration_tests/test_cli_no_network.py b/tests/integration_tests/test_cli_no_network.py index 5aa8571211..8fc551cde3 100644 --- a/tests/integration_tests/test_cli_no_network.py +++ b/tests/integration_tests/test_cli_no_network.py @@ -338,7 +338,7 @@ def test_btcli_help(self, _, __): def test_register_cuda_use_cuda_flag(self, _, __, patched_sub): base_args = [ "subnets", - "register", + "pow_register", "--wallet.path", "tmp/walletpath", "--wallet.name", @@ -358,24 +358,24 @@ def test_register_cuda_use_cuda_flag(self, _, __, patched_sub): # Should be able to set true without argument args = base_args + [ - "--register.cuda.use_cuda", # should be True without any arugment + "--pow_register.cuda.use_cuda", # should be True without any arugment ] with pytest.raises(MockException): cli = bittensor.cli(args=args) cli.run() - self.assertEqual(cli.config.register.cuda.get("use_cuda"), True) + self.assertEqual(cli.config.pow_register.cuda.get("use_cuda"), True) # Should be able to set to false with no argument args = base_args + [ - "--register.cuda.no_cuda", + "--pow_register.cuda.no_cuda", ] with pytest.raises(MockException): cli = bittensor.cli(args=args) cli.run() - self.assertEqual(cli.config.register.cuda.get("use_cuda"), False) + self.assertEqual(cli.config.pow_register.cuda.get("use_cuda"), False) def return_mock_sub_2(*args, **kwargs): diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py index 8448f9098b..e0c633e3b2 100644 --- a/tests/integration_tests/test_subtensor_integration.py +++ b/tests/integration_tests/test_subtensor_integration.py @@ -586,12 +586,11 @@ class ExitEarly(Exception): msg="only tries to submit once, then exits", ) + def test_defaults_to_finney(self): + sub = bittensor.subtensor() + assert sub.network == "finney" + assert sub.chain_endpoint == bittensor.__finney_entrypoint__ -# # This test was flaking, please check to_defaults before reactiving the test -# def _test_defaults_to_finney(): -# sub = bittensor.subtensor() -# assert sub.network == 'finney' -# assert sub.chain_endpoint == bittensor.__finney_entrypoint__ if __name__ == "__main__": unittest.main() diff --git a/tests/unit_tests/test_dendrite.py b/tests/unit_tests/test_dendrite.py index d61a9faf79..299fcbc424 100644 --- a/tests/unit_tests/test_dendrite.py +++ b/tests/unit_tests/test_dendrite.py @@ -18,11 +18,22 @@ # DEALINGS IN THE SOFTWARE. import pytest +import typing import bittensor from unittest.mock import MagicMock, Mock, patch from tests.helpers import _get_mock_wallet +class SynapseDummy(bittensor.Synapse): + input: int + output: typing.Optional[int] = None + + +def dummy(synapse: SynapseDummy) -> SynapseDummy: + synapse.output = synapse.input + 1 + return synapse + + @pytest.fixture def setup_dendrite(): user_wallet = ( @@ -32,6 +43,15 @@ def setup_dendrite(): return dendrite_obj +@pytest.fixture(scope="session") +def setup_axon(): + axon = bittensor.axon() + axon.attach(forward_fn=dummy) + axon.start() + yield axon + del axon + + def test_init(setup_dendrite): dendrite_obj = setup_dendrite assert isinstance(dendrite_obj, bittensor.dendrite) @@ -50,6 +70,26 @@ def test_repr(setup_dendrite): assert repr(dendrite_obj) == expected_string +def test_close(setup_dendrite, setup_axon): + axon = setup_axon + dendrite_obj = setup_dendrite + # Query the axon to open a session + dendrite_obj.query(axon, SynapseDummy(input=1)) + # Session should be automatically closed after query + assert dendrite_obj._session == None + + +@pytest.mark.asyncio +async def test_aclose(setup_dendrite, setup_axon): + axon = setup_axon + dendrite_obj = setup_dendrite + # Use context manager to open an async session + async with dendrite_obj: + resp = await dendrite_obj([axon], SynapseDummy(input=1), deserialize=False) + # Close should automatically be called on the session after context manager scope + assert dendrite_obj._session == None + + class AsyncMock(Mock): def __call__(self, *args, **kwargs): sup = super(AsyncMock, self) diff --git a/tests/unit_tests/test_synapse.py b/tests/unit_tests/test_synapse.py index c2900511e1..70dd88db76 100644 --- a/tests/unit_tests/test_synapse.py +++ b/tests/unit_tests/test_synapse.py @@ -25,7 +25,6 @@ def test_parse_headers_to_inputs(): class Test(bittensor.Synapse): key1: typing.List[int] - key2: bittensor.Tensor # Define a mock headers dictionary to use for testing headers = { @@ -34,7 +33,6 @@ class Test(bittensor.Synapse): "bt_header_input_obj_key1": base64.b64encode( json.dumps([1, 2, 3, 4]).encode("utf-8") ).decode("utf-8"), - "bt_header_tensor_key2": "[3]-torch.float32", "timeout": "12", "name": "Test", "header_size": "111", @@ -51,7 +49,6 @@ class Test(bittensor.Synapse): "axon": {"nonce": "111"}, "dendrite": {"ip": "12.1.1.2"}, "key1": [1, 2, 3, 4], - "key2": bittensor.Tensor(dtype="torch.float32", shape=[3]), "timeout": "12", "name": "Test", "header_size": "111", @@ -63,7 +60,6 @@ class Test(bittensor.Synapse): def test_from_headers(): class Test(bittensor.Synapse): key1: typing.List[int] - key2: bittensor.Tensor # Define a mock headers dictionary to use for testing headers = { @@ -72,7 +68,6 @@ class Test(bittensor.Synapse): "bt_header_input_obj_key1": base64.b64encode( json.dumps([1, 2, 3, 4]).encode("utf-8") ).decode("utf-8"), - "bt_header_tensor_key2": "[3]-torch.float32", "timeout": "12", "name": "Test", "header_size": "111", @@ -91,8 +86,6 @@ class Test(bittensor.Synapse): assert synapse.axon.nonce == 111 assert synapse.dendrite.ip == "12.1.1.2" assert synapse.key1 == [1, 2, 3, 4] - assert synapse.key2.shape == [3] - assert synapse.key2.dtype == "torch.float32" assert synapse.timeout == 12 assert synapse.name == "Test" assert synapse.header_size == 111 @@ -139,7 +132,6 @@ class Test(bittensor.Synapse): c: typing.Optional[int] # Not carried through headers d: typing.Optional[typing.List[int]] # Not carried through headers e: typing.List[int] # Carried through headers - f: bittensor.Tensor # Carried through headers, but not buffer. # Create an instance of the custom Synapse subclass synapse = Test( @@ -147,7 +139,6 @@ class Test(bittensor.Synapse): c=3, d=[1, 2, 3, 4], e=[1, 2, 3, 4], - f=bittensor.Tensor.serialize(torch.randn(10)), ) # Ensure the instance created is of type Test and has the expected properties @@ -158,7 +149,6 @@ class Test(bittensor.Synapse): assert synapse.c == 3 assert synapse.d == [1, 2, 3, 4] assert synapse.e == [1, 2, 3, 4] - assert synapse.f.shape == [10] # Convert the Test instance to a headers dictionary headers = synapse.to_headers() @@ -174,71 +164,6 @@ class Test(bittensor.Synapse): assert next_synapse.c == None assert next_synapse.d == None assert next_synapse.e == [] # Empty list is default for list types - assert next_synapse.f.shape == [10] # Shape is passed through - assert next_synapse.f.dtype == "torch.float32" # Type is passed through - assert next_synapse.f.buffer == None # Buffer is not passed through - - -def test_list_tensors(): - class Test(bittensor.Synapse): - a: typing.List[bittensor.Tensor] - - synapse = Test( - a=[bittensor.Tensor.serialize(torch.randn(10))], - ) - headers = synapse.to_headers() - assert "bt_header_list_tensor_a" in headers - assert headers["bt_header_list_tensor_a"] == "['[10]-torch.float32']" - next_synapse = synapse.from_headers(synapse.to_headers()) - assert next_synapse.a[0].dtype == "torch.float32" - assert next_synapse.a[0].shape == [10] - - class Test(bittensor.Synapse): - a: typing.List[bittensor.Tensor] - - synapse = Test( - a=[ - bittensor.Tensor.serialize(torch.randn(10)), - bittensor.Tensor.serialize(torch.randn(11)), - bittensor.Tensor.serialize(torch.randn(12)), - ], - ) - headers = synapse.to_headers() - assert "bt_header_list_tensor_a" in headers - assert ( - headers["bt_header_list_tensor_a"] - == "['[10]-torch.float32', '[11]-torch.float32', '[12]-torch.float32']" - ) - next_synapse = synapse.from_headers(synapse.to_headers()) - assert next_synapse.a[0].dtype == "torch.float32" - assert next_synapse.a[0].shape == [10] - assert next_synapse.a[1].dtype == "torch.float32" - assert next_synapse.a[1].shape == [11] - assert next_synapse.a[2].dtype == "torch.float32" - assert next_synapse.a[2].shape == [12] - - -def test_dict_tensors(): - class Test(bittensor.Synapse): - a: typing.Dict[str, bittensor.Tensor] - - synapse = Test( - a={ - "cat": bittensor.tensor(torch.randn(10)), - "dog": bittensor.tensor(torch.randn(11)), - }, - ) - headers = synapse.to_headers() - assert "bt_header_dict_tensor_a" in headers - assert ( - headers["bt_header_dict_tensor_a"] - == "['cat-[10]-torch.float32', 'dog-[11]-torch.float32']" - ) - next_synapse = synapse.from_headers(synapse.to_headers()) - assert next_synapse.a["cat"].dtype == "torch.float32" - assert next_synapse.a["cat"].shape == [10] - assert next_synapse.a["dog"].dtype == "torch.float32" - assert next_synapse.a["dog"].shape == [11] def test_body_hash_override(): @@ -263,3 +188,39 @@ def test_required_fields_override(): match='"required_hash_fields" has allow_mutation set to False and cannot be assigned', ): synapse_instance.required_hash_fields = [] + + +def test_default_instance_fields_dict_consistency(): + synapse_instance = bittensor.Synapse() + assert synapse_instance.dict() == { + "name": "Synapse", + "timeout": 12.0, + "total_size": 0, + "header_size": 0, + "dendrite": { + "status_code": None, + "status_message": None, + "process_time": None, + "ip": None, + "port": None, + "version": None, + "nonce": None, + "uuid": None, + "hotkey": None, + "signature": None, + }, + "axon": { + "status_code": None, + "status_message": None, + "process_time": None, + "ip": None, + "port": None, + "version": None, + "nonce": None, + "uuid": None, + "hotkey": None, + "signature": None, + }, + "computed_body_hash": "", + "required_hash_fields": [], + } diff --git a/tests/unit_tests/utils/test_utils.py b/tests/unit_tests/utils/test_utils.py index 307891a13c..cad54216c4 100644 --- a/tests/unit_tests/utils/test_utils.py +++ b/tests/unit_tests/utils/test_utils.py @@ -762,222 +762,5 @@ def test_get_explorer_url_for_network_by_network_and_block_hash( ) -class TestWalletReregister(unittest.TestCase): - _mock_subtensor: MockSubtensor - - def setUp(self): - self.subtensor = MockSubtensor() # own instance per test - - @classmethod - def setUpClass(cls) -> None: - # Keeps the same mock network for all tests. This stops the network from being re-setup for each test. - cls._mock_subtensor = MockSubtensor() - - cls._do_setup_subnet() - - @classmethod - def _do_setup_subnet(cls): - # reset the mock subtensor - cls._mock_subtensor.reset() - # Setup the mock subnet 3 - cls._mock_subtensor.create_subnet(netuid=3) - - def test_wallet_reregister_reregister_false(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - # Determine the correct string for patch based on Python version - if sys.version_info >= (3, 11): - patch_string = "bittensor.subtensor.subtensor.register" - else: - patch_string = "bittensor.subtensor.register" - - with patch(patch_string, side_effect=MockException) as mock_register: - with pytest.raises(SystemExit): # should exit because it's not registered - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - reregister=False, - ) - - mock_register.assert_not_called() # should not call register - - def test_wallet_reregister_reregister_false_and_registered_already(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - self._mock_subtensor.force_register_neuron( - netuid=3, - hotkey=mock_wallet.hotkey.ss58_address, - coldkey=mock_wallet.coldkeypub.ss58_address, - ) - self.assertTrue( - self._mock_subtensor.is_hotkey_registered_on_subnet( - netuid=3, - hotkey_ss58=mock_wallet.hotkey.ss58_address, - ) - ) - - # Determine the correct string for patch based on Python version - if sys.version_info >= (3, 11): - patch_string = "bittensor.subtensor.subtensor.register" - else: - patch_string = "bittensor.subtensor.register" - - with patch(patch_string, side_effect=MockException) as mock_register: - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - reregister=False, - ) # Should not exit because it's registered - - mock_register.assert_not_called() # should not call register - - def test_wallet_reregister_reregister_true_and_registered_already(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - self._mock_subtensor.force_register_neuron( - netuid=3, - hotkey=mock_wallet.hotkey.ss58_address, - coldkey=mock_wallet.coldkeypub.ss58_address, - ) - self.assertTrue( - self._mock_subtensor.is_hotkey_registered_on_subnet( - netuid=3, - hotkey_ss58=mock_wallet.hotkey.ss58_address, - ) - ) - - # Determine the correct string for patch based on Python version - if sys.version_info >= (3, 11): - patch_string = "bittensor.subtensor.subtensor.register" - else: - patch_string = "bittensor.subtensor.register" - - with patch(patch_string, side_effect=MockException) as mock_register: - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - reregister=True, - ) # Should not exit because it's registered - - mock_register.assert_not_called() # should not call register - - def test_wallet_reregister_no_params(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - # Determine the correct string for patch based on Python version - if sys.version_info >= (3, 11): - patch_string = "bittensor.subtensor.subtensor.register" - else: - patch_string = "bittensor.subtensor.register" - - with patch(patch_string, side_effect=MockException) as mock_register: - # Should be able to set without argument - with pytest.raises(MockException): - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - reregister=True, - # didn't pass any register params - ) - - mock_register.assert_called_once() # should call register once - - def test_wallet_reregister_use_cuda_flag_true(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - with patch("torch.cuda.is_available", return_value=True) as mock_cuda_available: - with patch( - "bittensor.extrinsics.registration.create_pow", - side_effect=MockException, - ) as mock_create_pow: - # Should be able to set without argument - with pytest.raises(MockException): - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - dev_id=0, - cuda=True, - reregister=True, - ) - - call_args = mock_create_pow.call_args - _, kwargs = call_args - - mock_create_pow.assert_called_once() - self.assertIn("cuda", kwargs) - self.assertEqual(kwargs["cuda"], True) - - def test_wallet_reregister_use_cuda_flag_false(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - with patch( - "bittensor.extrinsics.registration.create_pow", side_effect=MockException - ) as mock_create_pow: - # Should be able to set without argument - with pytest.raises(MockException): - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - dev_id=0, - cuda=False, - reregister=True, - ) - - call_args = mock_create_pow.call_args - _, kwargs = call_args - - mock_create_pow.assert_called_once() - self.assertEqual(kwargs["cuda"], False) - - def test_wallet_reregister_cuda_arg_not_specified_should_be_false(self): - mock_wallet = _generate_wallet(hotkey=_get_mock_keypair(100, self.id())) - - class MockException(Exception): - pass - - with patch( - "bittensor.extrinsics.registration.create_pow", side_effect=MockException - ) as mock_create_pow: - # Should be able to set without argument - with pytest.raises(MockException): - bittensor.utils.reregister( - wallet=mock_wallet, - subtensor=self._mock_subtensor, - netuid=3, - dev_id=0, - reregister=True, - ) - - call_args = mock_create_pow.call_args - _, kwargs = call_args - - mock_create_pow.assert_called_once() - self.assertEqual(kwargs["cuda"], False) # should be False by default - - if __name__ == "__main__": unittest.main()