diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py
new file mode 100644
index 0000000000..aa2b65fb30
--- /dev/null
+++ b/bittensor/core/async_subtensor.py
@@ -0,0 +1,1222 @@
+import asyncio
+from typing import Optional, Any, Union, TypedDict, Iterable
+
+import aiohttp
+import numpy as np
+import scalecodec
+import typer
+from bittensor_wallet import Wallet
+from bittensor_wallet.utils import SS58_FORMAT
+from rich.prompt import Confirm
+from scalecodec import GenericCall
+from scalecodec.base import RuntimeConfiguration
+from scalecodec.type_registry import load_type_registry_preset
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.chain_data import (
+ DelegateInfo,
+ custom_rpc_type_registry,
+ StakeInfo,
+ NeuronInfoLite,
+ NeuronInfo,
+ SubnetHyperparameters,
+ decode_account_id,
+)
+from bittensor.core.extrinsics.async_registration import register_extrinsic
+from bittensor.core.extrinsics.async_root import (
+ set_root_weights_extrinsic,
+ root_register_extrinsic,
+)
+from bittensor.core.extrinsics.async_transfer import transfer_extrinsic
+from bittensor.core.settings import (
+ TYPE_REGISTRY,
+ DEFAULTS,
+ NETWORK_MAP,
+ DELEGATES_DETAILS_URL,
+ DEFAULT_NETWORK,
+)
+from bittensor.utils import (
+ ss58_to_vec_u8,
+ format_error_message,
+ decode_hex_identity_dict,
+ validate_chain_endpoint,
+)
+from bittensor.utils.async_substrate_interface import (
+ AsyncSubstrateInterface,
+ TimeoutException,
+)
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
+from bittensor.utils.delegates_details import DelegatesDetails
+
+
+class ParamWithTypes(TypedDict):
+ name: str # Name of the parameter.
+ type: str # ScaleType string of the parameter.
+
+
+class ProposalVoteData:
+ index: int
+ threshold: int
+ ayes: list[str]
+ nays: list[str]
+ end: int
+
+ def __init__(self, proposal_dict: dict) -> None:
+ self.index = proposal_dict["index"]
+ self.threshold = proposal_dict["threshold"]
+ self.ayes = self.decode_ss58_tuples(proposal_dict["ayes"])
+ self.nays = self.decode_ss58_tuples(proposal_dict["nays"])
+ self.end = proposal_dict["end"]
+
+ @staticmethod
+ def decode_ss58_tuples(line: tuple):
+ """Decodes a tuple of ss58 addresses formatted as bytes tuples."""
+ return [decode_account_id(line[x][0]) for x in range(len(line))]
+
+
+class AsyncSubtensor:
+ """Thin layer for interacting with Substrate Interface. Mostly a collection of frequently-used calls."""
+
+ def __init__(self, network: str = DEFAULT_NETWORK):
+ if network in NETWORK_MAP:
+ self.chain_endpoint = NETWORK_MAP[network]
+ self.network = network
+ if network == "local":
+ logging.warning(
+ "[yellow]Warning[/yellow]: Verify your local subtensor is running on port 9944."
+ )
+ else:
+ is_valid, _ = validate_chain_endpoint(network)
+ if is_valid:
+ self.chain_endpoint = network
+ if network in NETWORK_MAP.values():
+ self.network = next(
+ key for key, value in NETWORK_MAP.items() if value == network
+ )
+ else:
+ self.network = "custom"
+ else:
+ logging.info(
+ f"Network not specified or not valid. Using default chain endpoint: {NETWORK_MAP[DEFAULTS.subtensor.network]}."
+ )
+ logging.info(
+ "You can set this for commands with the --network flag, or by setting this in the config."
+ )
+ self.chain_endpoint = NETWORK_MAP[DEFAULTS.subtensor.network]
+ self.network = DEFAULTS.subtensor.network
+
+ self.substrate = AsyncSubstrateInterface(
+ chain_endpoint=self.chain_endpoint,
+ ss58_format=SS58_FORMAT,
+ type_registry=TYPE_REGISTRY,
+ chain_name="Bittensor",
+ )
+
+ def __str__(self):
+ return f"Network: {self.network}, Chain: {self.chain_endpoint}"
+
+ async def __aenter__(self):
+ logging.info(
+ f"Connecting to Substrate: {self}..."
+ )
+ try:
+ async with self.substrate:
+ return self
+ except TimeoutException:
+ logging.error(
+ f"Error: Timeout occurred connecting to substrate. Verify your chain and network settings: {self}"
+ )
+ raise typer.Exit(code=1)
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.substrate.close()
+
+ async def encode_params(
+ self,
+ call_definition: list["ParamWithTypes"],
+ params: Union[list[Any], dict[str, Any]],
+ ) -> str:
+ """Returns a hex encoded string of the params using their types."""
+ param_data = scalecodec.ScaleBytes(b"")
+
+ for i, param in enumerate(call_definition["params"]): # type: ignore
+ scale_obj = await self.substrate.create_scale_object(param["type"])
+ if isinstance(params, list):
+ param_data += scale_obj.encode(params[i])
+ else:
+ if param["name"] not in params:
+ raise ValueError(f"Missing param {param['name']} in params dict.")
+
+ param_data += scale_obj.encode(params[param["name"]])
+
+ return param_data.to_hex()
+
+ async def get_all_subnet_netuids(
+ self, block_hash: Optional[str] = None
+ ) -> list[int]:
+ """
+ Retrieves the list of all subnet unique identifiers (netuids) currently present in the Bittensor network.
+
+ :param block_hash: The hash of the block to retrieve the subnet unique identifiers from.
+ :return: A list of subnet netuids.
+
+ This function provides a comprehensive view of the subnets within the Bittensor network,
+ offering insights into its diversity and scale.
+ """
+ result = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ block_hash=block_hash,
+ reuse_block_hash=True,
+ )
+ return (
+ []
+ if result is None or not hasattr(result, "records")
+ else [netuid async for netuid, exists in result if exists]
+ )
+
+ async def is_hotkey_delegate(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: Optional[bool] = False,
+ ) -> bool:
+ """
+ Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function
+ checks if the neuron associated with the hotkey is part of the network's delegation system.
+
+ :param hotkey_ss58: The SS58 address of the neuron's hotkey.
+ :param block_hash: The hash of the blockchain block number for the query.
+ :param reuse_block: Whether to reuse the last-used block hash.
+
+ :return: `True` if the hotkey is a delegate, `False` otherwise.
+
+ Being a delegate is a significant status within the Bittensor network, indicating a neuron's
+ involvement in consensus and governance processes.
+ """
+ delegates = await self.get_delegates(
+ block_hash=block_hash, reuse_block=reuse_block
+ )
+ return hotkey_ss58 in [info.hotkey_ss58 for info in delegates]
+
+ async def get_delegates(
+ self, block_hash: Optional[str] = None, reuse_block: Optional[bool] = False
+ ) -> list[DelegateInfo]:
+ """
+ Fetches all delegates on the chain
+
+ :param block_hash: hash of the blockchain block number for the query.
+ :param reuse_block: whether to reuse the last-used block hash.
+
+ :return: List of DelegateInfo objects, or an empty list if there are no delegates.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="DelegateInfoRuntimeApi",
+ method="get_delegates",
+ params=[],
+ block_hash=block_hash,
+ )
+ if hex_bytes_result is not None:
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return DelegateInfo.list_from_vec_u8(bytes_result)
+ else:
+ return []
+
+ async def get_stake_info_for_coldkey(
+ self,
+ coldkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[StakeInfo]:
+ """
+ Retrieves stake information associated with a specific coldkey. This function provides details
+ about the stakes held by an account, including the staked amounts and associated delegates.
+
+ :param coldkey_ss58: The ``SS58`` address of the account's coldkey.
+ :param block_hash: The hash of the blockchain block number for the query.
+ :param reuse_block: Whether to reuse the last-used block hash.
+
+ :return: A list of StakeInfo objects detailing the stake allocations for the account.
+
+ Stake information is vital for account holders to assess their investment and participation
+ in the network's delegation and consensus processes.
+ """
+ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58)
+
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="StakeInfoRuntimeApi",
+ method="get_stake_info_for_coldkey",
+ params=[encoded_coldkey],
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return StakeInfo.list_from_vec_u8(bytes_result)
+
+ async def get_stake_for_coldkey_and_hotkey(
+ self, hotkey_ss58: str, coldkey_ss58: str, block_hash: Optional[str]
+ ) -> Balance:
+ """
+ Retrieves stake information associated with a specific coldkey and hotkey.
+ :param hotkey_ss58: the hotkey SS58 address to query
+ :param coldkey_ss58: the coldkey SS58 address to query
+ :param block_hash: the hash of the blockchain block number for the query.
+ :return: Stake Balance for the given coldkey and hotkey
+ """
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Stake",
+ params=[hotkey_ss58, coldkey_ss58],
+ block_hash=block_hash,
+ )
+ return Balance.from_rao(_result or 0)
+
+ async def query_runtime_api(
+ self,
+ runtime_api: str,
+ method: str,
+ params: Optional[Union[list[list[int]], dict[str, int]]],
+ block_hash: Optional[str] = None,
+ reuse_block: Optional[bool] = False,
+ ) -> Optional[str]:
+ """
+ Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying
+ runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users
+ who need to interact with specific runtime methods and decode complex data types.
+
+ :param runtime_api: The name of the runtime API to query.
+ :param method: The specific method within the runtime API to call.
+ :param params: The parameters to pass to the method call.
+ :param block_hash: The hash of the blockchain block number at which to perform the query.
+ :param reuse_block: Whether to reuse the last-used block hash.
+
+ :return: The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails.
+
+ This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed
+ and specific interactions with the network's runtime environment.
+ """
+ call_definition = TYPE_REGISTRY["runtime_api"][runtime_api]["methods"][method]
+
+ data = (
+ "0x"
+ if params is None
+ else await self.encode_params(
+ call_definition=call_definition, params=params
+ )
+ )
+ api_method = f"{runtime_api}_{method}"
+
+ json_result = await self.substrate.rpc_request(
+ method="state_call",
+ params=[api_method, data, block_hash] if block_hash else [api_method, data],
+ )
+
+ if json_result is None:
+ return None
+
+ return_type = call_definition["type"]
+
+ as_scale_bytes = scalecodec.ScaleBytes(json_result["result"]) # type: ignore
+
+ rpc_runtime_config = RuntimeConfiguration()
+ rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy"))
+ rpc_runtime_config.update_type_registry(custom_rpc_type_registry)
+
+ obj = rpc_runtime_config.create_scale_object(return_type, as_scale_bytes)
+ if obj.data.to_hex() == "0x0400": # RPC returned None result
+ return None
+
+ return obj.decode()
+
+ async def get_balance(
+ self,
+ *addresses: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict[str, Balance]:
+ """
+ Retrieves the balance for given coldkey(s)
+ :param addresses: coldkey addresses(s)
+ :param block_hash: the block hash, optional
+ :param reuse_block: Whether to reuse the last-used block hash when retrieving info.
+ :return: dict of {address: Balance objects}
+ """
+ calls = [
+ (
+ await self.substrate.create_storage_key(
+ "System", "Account", [address], block_hash=block_hash
+ )
+ )
+ for address in addresses
+ ]
+ batch_call = await self.substrate.query_multi(calls, block_hash=block_hash)
+ results = {}
+ for item in batch_call:
+ value = item[1] or {"data": {"free": 0}}
+ results.update({item[0].params[0]: Balance(value["data"]["free"])})
+ return results
+
+ async def get_total_stake_for_coldkey(
+ self,
+ *ss58_addresses,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict[str, Balance]:
+ """
+ Returns the total stake held on a coldkey.
+
+ :param ss58_addresses: The SS58 address(es) of the coldkey(s)
+ :param block_hash: The hash of the block number to retrieve the stake from.
+ :param reuse_block: Whether to reuse the last-used block hash when retrieving info.
+
+ :return: {address: Balance objects}
+ """
+ calls = [
+ (
+ await self.substrate.create_storage_key(
+ "SubtensorModule",
+ "TotalColdkeyStake",
+ [address],
+ block_hash=block_hash,
+ )
+ )
+ for address in ss58_addresses
+ ]
+ batch_call = await self.substrate.query_multi(calls, block_hash=block_hash)
+ results = {}
+ for item in batch_call:
+ results.update({item[0].params[0]: Balance.from_rao(item[1] or 0)})
+ return results
+
+ async def get_total_stake_for_hotkey(
+ self,
+ *ss58_addresses,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict[str, Balance]:
+ """
+ Returns the total stake held on a hotkey.
+
+ :param ss58_addresses: The SS58 address(es) of the hotkey(s)
+ :param block_hash: The hash of the block number to retrieve the stake from.
+ :param reuse_block: Whether to reuse the last-used block hash when retrieving info.
+
+ :return: {address: Balance objects}
+ """
+ results = await self.substrate.query_multiple(
+ params=[s for s in ss58_addresses],
+ module="SubtensorModule",
+ storage_function="TotalHotkeyStake",
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return {k: Balance.from_rao(r or 0) for (k, r) in results.items()}
+
+ async def get_netuids_for_hotkey(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[int]:
+ """
+ Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function
+ identifies the specific subnets within the Bittensor network where the neuron associated with
+ the hotkey is active.
+
+ :param hotkey_ss58: The ``SS58`` address of the neuron's hotkey.
+ :param block_hash: The hash of the blockchain block number at which to perform the query.
+ :param reuse_block: Whether to reuse the last-used block hash when retrieving info.
+
+ :return: A list of netuids where the neuron is a member.
+ """
+
+ result = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="IsNetworkMember",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return (
+ [record[0] async for record in result if record[1]]
+ if result and hasattr(result, "records")
+ else []
+ )
+
+ async def subnet_exists(
+ self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> bool:
+ """
+ Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network.
+
+ :param netuid: The unique identifier of the subnet.
+ :param block_hash: The hash of the blockchain block number at which to check the subnet existence.
+ :param reuse_block: Whether to reuse the last-used block hash.
+
+ :return: `True` if the subnet exists, `False` otherwise.
+
+ This function is critical for verifying the presence of specific subnets in the network,
+ enabling a deeper understanding of the network's structure and composition.
+ """
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ params=[netuid],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return result
+
+ async def get_hyperparameter(
+ self,
+ param_name: str,
+ netuid: int,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional[Any]:
+ """
+ Retrieves a specified hyperparameter for a specific subnet.
+
+ :param param_name: The name of the hyperparameter to retrieve.
+ :param netuid: The unique identifier of the subnet.
+ :param block_hash: The hash of blockchain block number for the query.
+ :param reuse_block: Whether to reuse the last-used block hash.
+
+ :return: The value of the specified hyperparameter if the subnet exists, or None
+ """
+ if not await self.subnet_exists(netuid, block_hash):
+ print("subnet does not exist")
+ return None
+
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function=param_name,
+ params=[netuid],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+
+ if result is None:
+ return None
+
+ return result
+
+ async def filter_netuids_by_registered_hotkeys(
+ self,
+ all_netuids: Iterable[int],
+ filter_for_netuids: Iterable[int],
+ all_hotkeys: Iterable[Wallet],
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[int]:
+ """
+ Filters a given list of all netuids for certain specified netuids and hotkeys
+
+ :param all_netuids: A list of netuids to filter.
+ :param filter_for_netuids: A subset of all_netuids to filter from the main list
+ :param all_hotkeys: Hotkeys to filter from the main list
+ :param block_hash: hash of the blockchain block number at which to perform the query.
+ :param reuse_block: whether to reuse the last-used blockchain hash when retrieving info.
+
+ :return: the filtered list of netuids.
+ """
+ netuids_with_registered_hotkeys = [
+ item
+ for sublist in await asyncio.gather(
+ *[
+ self.get_netuids_for_hotkey(
+ wallet.hotkey.ss58_address,
+ reuse_block=reuse_block,
+ block_hash=block_hash,
+ )
+ for wallet in all_hotkeys
+ ]
+ )
+ for item in sublist
+ ]
+
+ if not filter_for_netuids:
+ all_netuids = netuids_with_registered_hotkeys
+
+ else:
+ filtered_netuids = [
+ netuid for netuid in all_netuids if netuid in filter_for_netuids
+ ]
+
+ registered_hotkeys_filtered = [
+ netuid
+ for netuid in netuids_with_registered_hotkeys
+ if netuid in filter_for_netuids
+ ]
+
+ # Combine both filtered lists
+ all_netuids = filtered_netuids + registered_hotkeys_filtered
+
+ return list(set(all_netuids))
+
+ async def get_existential_deposit(
+ self, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> Balance:
+ """
+ Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit
+ is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with
+ balances below this threshold can be reaped to conserve network resources.
+
+ :param block_hash: Block hash at which to query the deposit amount. If `None`, the current block is used.
+ :param reuse_block: Whether to reuse the last-used blockchain block hash.
+
+ :return: The existential deposit amount
+
+ The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring
+ efficient use of storage and preventing the proliferation of dust accounts.
+ """
+ result = await self.substrate.get_constant(
+ module_name="Balances",
+ constant_name="ExistentialDeposit",
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+
+ if result is None:
+ raise Exception("Unable to retrieve existential deposit amount.")
+
+ return Balance.from_rao(result)
+
+ async def neurons(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[NeuronInfo]:
+ """
+ Retrieves a list of all neurons within a specified subnet of the Bittensor network. This function
+ provides a snapshot of the subnet's neuron population, including each neuron's attributes and network
+ interactions.
+
+ :param netuid: The unique identifier of the subnet.
+ :param block_hash: The hash of the blockchain block number for the query.
+
+ :return: A list of NeuronInfo objects detailing each neuron's characteristics in the subnet.
+
+ Understanding the distribution and status of neurons within a subnet is key to comprehending the
+ network's decentralized structure and the dynamics of its consensus and governance processes.
+ """
+ neurons_lite, weights, bonds = await asyncio.gather(
+ self.neurons_lite(netuid=netuid, block_hash=block_hash),
+ self.weights(netuid=netuid, block_hash=block_hash),
+ self.bonds(netuid=netuid, block_hash=block_hash),
+ )
+
+ weights_as_dict = {uid: w for uid, w in weights}
+ bonds_as_dict = {uid: b for uid, b in bonds}
+
+ neurons = [
+ NeuronInfo.from_weights_bonds_and_neuron_lite(
+ neuron_lite, weights_as_dict, bonds_as_dict
+ )
+ for neuron_lite in neurons_lite
+ ]
+
+ return neurons
+
+ async def neurons_lite(
+ self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> list[NeuronInfoLite]:
+ """
+ Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network.
+ This function provides a streamlined view of the neurons, focusing on key attributes such as stake
+ and network participation.
+
+ :param netuid: The unique identifier of the subnet.
+ :param block_hash: The hash of the blockchain block number for the query.
+ :param reuse_block: Whether to reuse the last-used blockchain block hash.
+
+ :return: A list of simplified neuron information for the subnet.
+
+ This function offers a quick overview of the neuron population within a subnet, facilitating
+ efficient analysis of the network's decentralized structure and neuron dynamics.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="NeuronInfoRuntimeApi",
+ method="get_neurons_lite",
+ params=[
+ netuid
+ ], # TODO check to see if this can accept more than one at a time
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return NeuronInfoLite.list_from_vec_u8(bytes_result)
+
+ async def neuron_for_uid(
+ self, uid: Optional[int], netuid: int, block_hash: Optional[str] = None
+ ) -> NeuronInfo:
+ """
+ Retrieves detailed information about a specific neuron identified by its unique identifier (UID)
+ within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive
+ view of a neuron's attributes, including its stake, rank, and operational status.
+
+
+ :param uid: The unique identifier of the neuron.
+ :param netuid: The unique identifier of the subnet.
+ :param block_hash: The hash of the blockchain block number for the query.
+
+ :return: Detailed information about the neuron if found, a null neuron otherwise
+
+ This function is crucial for analyzing individual neurons' contributions and status within a specific
+ subnet, offering insights into their roles in the network's consensus and validation mechanisms.
+ """
+ if uid is None:
+ return NeuronInfo.get_null_neuron()
+
+ params = [netuid, uid, block_hash] if block_hash else [netuid, uid]
+ json_body = await self.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params, # custom rpc method
+ )
+
+ if not (result := json_body.get("result", None)):
+ return NeuronInfo.get_null_neuron()
+
+ bytes_result = bytes(result)
+ return NeuronInfo.from_vec_u8(bytes_result)
+
+ async def get_delegated(
+ self,
+ coldkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[tuple[DelegateInfo, Balance]]:
+ """
+ Retrieves a list of delegates and their associated stakes for a given coldkey. This function
+ identifies the delegates that a specific account has staked tokens on.
+
+ :param coldkey_ss58: The `SS58` address of the account's coldkey.
+ :param block_hash: The hash of the blockchain block number for the query.
+ :param reuse_block: Whether to reuse the last-used blockchain block hash.
+
+ :return: A list of tuples, each containing a delegate's information and staked amount.
+
+ This function is important for account holders to understand their stake allocations and their
+ involvement in the network's delegation and consensus mechanisms.
+ """
+
+ block_hash = (
+ block_hash
+ if block_hash
+ else (self.substrate.last_block_hash if reuse_block else None)
+ )
+ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58)
+ json_body = await self.substrate.rpc_request(
+ method="delegateInfo_getDelegated",
+ params=([block_hash, encoded_coldkey] if block_hash else [encoded_coldkey]),
+ )
+
+ if not (result := json_body.get("result")):
+ return []
+
+ return DelegateInfo.delegated_list_from_vec_u8(bytes(result))
+
+ async def query_identity(
+ self,
+ key: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict:
+ """
+ Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves
+ detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized
+ identity and governance system.
+
+ Note:
+ See the `Bittensor CLI documentation `_ for supported identity
+ parameters.
+
+ :param key: The key used to query the neuron's identity, typically the neuron's SS58 address.
+ :param block_hash: The hash of the blockchain block number at which to perform the query.
+ :param reuse_block: Whether to reuse the last-used blockchain block hash.
+
+ :return: An object containing the identity information of the neuron if found, ``None`` otherwise.
+
+ The identity information can include various attributes such as the neuron's stake, rank, and other
+ network-specific details, providing insights into the neuron's role and status within the Bittensor network.
+ """
+
+ def decode_hex_identity_dict_(info_dictionary):
+ for k, v in info_dictionary.items():
+ if isinstance(v, dict):
+ item = next(iter(v.values()))
+ else:
+ item = v
+ if isinstance(item, tuple) and item:
+ if len(item) > 1:
+ try:
+ info_dictionary[k] = (
+ bytes(item).hex(sep=" ", bytes_per_sep=2).upper()
+ )
+ except UnicodeDecodeError:
+ print(f"Could not decode: {k}: {item}")
+ else:
+ try:
+ info_dictionary[k] = bytes(item[0]).decode("utf-8")
+ except UnicodeDecodeError:
+ print(f"Could not decode: {k}: {item}")
+ else:
+ info_dictionary[k] = item
+
+ return info_dictionary
+
+ identity_info = await self.substrate.query(
+ module="Registry",
+ storage_function="IdentityOf",
+ params=[key],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ try:
+ return decode_hex_identity_dict_(identity_info["info"])
+ except TypeError:
+ return {}
+
+ async def weights(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[tuple[int, list[tuple[int, int]]]]:
+ """
+ Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network.
+ This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the
+ network's trust and value assignment mechanisms.
+
+ Args:
+ :param netuid: The network UID of the subnet to query.
+ :param block_hash: The hash of the blockchain block for the query.
+
+ :return: A list of tuples mapping each neuron's UID to its assigned weights.
+
+ The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons,
+ influencing their influence and reward allocation within the subnet.
+ """
+ # TODO look into seeing if we can speed this up with storage query
+ w_map_encoded = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="Weights",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+ w_map = [(uid, w or []) async for uid, w in w_map_encoded]
+
+ return w_map
+
+ async def bonds(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[tuple[int, list[tuple[int, int]]]]:
+ """
+ Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network.
+ Bonds represent the investments or commitments made by neurons in one another, indicating a level
+ of trust and perceived value. This bonding mechanism is integral to the network's market-based approach
+ to measuring and rewarding machine intelligence.
+
+ :param netuid: The network UID of the subnet to query.
+ :param block_hash: The hash of the blockchain block number for the query.
+
+ :return: list of tuples mapping each neuron's UID to its bonds with other neurons.
+
+ Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior
+ within the subnet. It reflects how neurons recognize and invest in each other's intelligence and
+ contributions, supporting diverse and niche systems within the Bittensor ecosystem.
+ """
+ b_map_encoded = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="Bonds",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+ b_map = [(uid, b) async for uid, b in b_map_encoded]
+
+ return b_map
+
+ async def does_hotkey_exist(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> bool:
+ """
+ Returns true if the hotkey is known by the chain and there are accounts.
+
+ :param hotkey_ss58: The SS58 address of the hotkey.
+ :param block_hash: The hash of the block number to check the hotkey against.
+ :param reuse_block: Whether to reuse the last-used blockchain hash.
+
+ :return: `True` if the hotkey is known by the chain and there are accounts, `False` otherwise.
+ """
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ result = decode_account_id(_result[0])
+ return_val = (
+ False
+ if result is None
+ else result != "5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM"
+ )
+ return return_val
+
+ async def get_hotkey_owner(
+ self, hotkey_ss58: str, block_hash: str
+ ) -> Optional[str]:
+ hk_owner_query = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ )
+ val = decode_account_id(hk_owner_query[0])
+ if val:
+ exists = await self.does_hotkey_exist(hotkey_ss58, block_hash=block_hash)
+ else:
+ exists = False
+ hotkey_owner = val if exists else None
+ return hotkey_owner
+
+ async def sign_and_send_extrinsic(
+ self,
+ call: GenericCall,
+ wallet: Wallet,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ ) -> tuple[bool, str]:
+ """
+ Helper method to sign and submit an extrinsic call to chain.
+
+ :param call: a prepared Call object
+ :param wallet: the wallet whose coldkey will be used to sign the extrinsic
+ :param wait_for_inclusion: whether to wait until the extrinsic call is included on the chain
+ :param wait_for_finalization: whether to wait until the extrinsic call is finalized on the chain
+
+ :return: (success, error message)
+ """
+ extrinsic = await self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ ) # sign with coldkey
+ try:
+ response = await self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, ""
+ await response.process_events()
+ if await response.is_success:
+ return True, ""
+ else:
+ return False, format_error_message(
+ await response.error_message, substrate=self.substrate
+ )
+ except SubstrateRequestException as e:
+ return False, format_error_message(e, substrate=self.substrate)
+
+ async def get_children(self, hotkey, netuid) -> tuple[bool, list, str]:
+ """
+ This method retrieves the children of a given hotkey and netuid. It queries the SubtensorModule's ChildKeys
+ storage function to get the children and formats them before returning as a tuple.
+
+ :param hotkey: The hotkey value.
+ :param netuid: The netuid value.
+
+ :return: A tuple containing a boolean indicating success or failure, a list of formatted children, and an error
+ message (if applicable)
+ """
+ try:
+ children = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="ChildKeys",
+ params=[hotkey, netuid],
+ )
+ if children:
+ formatted_children = []
+ for proportion, child in children:
+ # Convert U64 to int
+ formatted_child = decode_account_id(child[0])
+ int_proportion = int(proportion)
+ formatted_children.append((int_proportion, formatted_child))
+ return True, formatted_children, ""
+ else:
+ return True, [], ""
+ except SubstrateRequestException as e:
+ return False, [], format_error_message(e, self.substrate)
+
+ async def get_subnet_hyperparameters(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> Optional[Union[list, SubnetHyperparameters]]:
+ """
+ Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters
+ define the operational settings and rules governing the subnet's behavior.
+
+ :param netuid: The network UID of the subnet to query.
+ :param block_hash: The hash of the blockchain block number for the query.
+
+ :return: The subnet's hyperparameters, or `None` if not available.
+
+ Understanding the hyperparameters is crucial for comprehending how subnets are configured and
+ managed, and how they interact with the network's consensus and incentive mechanisms.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="SubnetInfoRuntimeApi",
+ method="get_subnet_hyperparams",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ if hex_bytes_result.startswith("0x"):
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ else:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return SubnetHyperparameters.from_vec_u8(bytes_result)
+
+ async def get_vote_data(
+ self,
+ proposal_hash: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional["ProposalVoteData"]:
+ """
+ Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes
+ information about how senate members have voted on the proposal.
+
+ :param proposal_hash: The hash of the proposal for which voting data is requested.
+ :param block_hash: The hash of the blockchain block number to query the voting data.
+ :param reuse_block: Whether to reuse the last-used blockchain block hash.
+
+ :return: An object containing the proposal's voting data, or `None` if not found.
+
+ This function is important for tracking and understanding the decision-making processes within
+ the Bittensor network, particularly how proposals are received and acted upon by the governing body.
+ """
+ vote_data = await self.substrate.query(
+ module="Triumvirate",
+ storage_function="Voting",
+ params=[proposal_hash],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ if vote_data is None:
+ return None
+ else:
+ return ProposalVoteData(vote_data)
+
+ async def get_delegate_identities(
+ self, block_hash: Optional[str] = None
+ ) -> dict[str, DelegatesDetails]:
+ """
+ Fetches delegates identities from the chain and GitHub. Preference is given to chain data, and missing info
+ is filled-in by the info from GitHub. At some point, we want to totally move away from fetching this info
+ from GitHub, but chain data is still limited in that regard.
+
+ Args:
+ block_hash: the hash of the blockchain block for the query
+
+ Returns: {ss58: DelegatesDetails, ...}
+
+ """
+ timeout = aiohttp.ClientTimeout(10.0)
+ async with aiohttp.ClientSession(timeout=timeout) as session:
+ identities_info, response = await asyncio.gather(
+ self.substrate.query_map(
+ module="Registry",
+ storage_function="IdentityOf",
+ block_hash=block_hash,
+ ),
+ session.get(DELEGATES_DETAILS_URL),
+ )
+
+ all_delegates_details = {
+ decode_account_id(ss58_address[0]): DelegatesDetails.from_chain_data(
+ decode_hex_identity_dict(identity["info"])
+ )
+ for ss58_address, identity in identities_info
+ }
+
+ if response.ok:
+ all_delegates: dict[str, Any] = await response.json(content_type=None)
+
+ for delegate_hotkey, delegate_details in all_delegates.items():
+ delegate_info = all_delegates_details.setdefault(
+ delegate_hotkey,
+ DelegatesDetails(
+ display=delegate_details.get("name", ""),
+ web=delegate_details.get("url", ""),
+ additional=delegate_details.get("description", ""),
+ pgp_fingerprint=delegate_details.get("fingerprint", ""),
+ ),
+ )
+ delegate_info.display = (
+ delegate_info.display or delegate_details.get("name", "")
+ )
+ delegate_info.web = delegate_info.web or delegate_details.get(
+ "url", ""
+ )
+ delegate_info.additional = (
+ delegate_info.additional
+ or delegate_details.get("description", "")
+ )
+ delegate_info.pgp_fingerprint = (
+ delegate_info.pgp_fingerprint
+ or delegate_details.get("fingerprint", "")
+ )
+
+ return all_delegates_details
+
+ async def is_hotkey_registered(self, netuid: int, hotkey_ss58: str) -> bool:
+ """Checks to see if the hotkey is registered on a given netuid"""
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ )
+ if _result is not None:
+ return True
+ else:
+ return False
+
+ # extrinsics
+
+ async def transfer(
+ self,
+ wallet: Wallet,
+ destination: str,
+ amount: float,
+ transfer_all: bool,
+ prompt: bool,
+ ):
+ """Transfer token of amount to destination."""
+ return await transfer_extrinsic(
+ self,
+ wallet,
+ destination,
+ Balance.from_tao(amount),
+ transfer_all,
+ prompt=prompt,
+ )
+
+ async def register(self, wallet: Wallet, prompt: bool):
+ """Register neuron by recycling some TAO."""
+ logging.info(
+ f"Registering on netuid 0 on network: {self.network}"
+ )
+
+ # Check current recycle amount
+ logging.info("Fetching recycle amount & balance.")
+ recycle_call, balance_ = await asyncio.gather(
+ self.get_hyperparameter(param_name="Burn", netuid=0, reuse_block=True),
+ self.get_balance(wallet.coldkeypub.ss58_address, reuse_block=True),
+ )
+ current_recycle = Balance.from_rao(int(recycle_call))
+ try:
+ balance: Balance = balance_[wallet.coldkeypub.ss58_address]
+ except TypeError as e:
+ logging.error(f"Unable to retrieve current recycle. {e}")
+ return False
+ except KeyError:
+ logging.error("Unable to retrieve current balance.")
+ return False
+
+ # Check balance is sufficient
+ if balance < current_recycle:
+ logging.error(
+ f"Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO"
+ )
+ return False
+
+ if prompt:
+ if not Confirm.ask(
+ f"Your balance is: [bold green]{balance}[/bold green]\n"
+ f"The cost to register by recycle is [bold red]{current_recycle}[/bold red]\n"
+ f"Do you want to continue?",
+ default=False,
+ ):
+ return False
+
+ return await root_register_extrinsic(
+ self,
+ wallet,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ prompt=prompt,
+ )
+
+ async def pow_register(
+ self: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid,
+ processors,
+ update_interval,
+ output_in_place,
+ verbose,
+ use_cuda,
+ dev_id,
+ threads_per_block,
+ ):
+ """Register neuron."""
+ return await register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ prompt=True,
+ tpb=threads_per_block,
+ update_interval=update_interval,
+ num_processes=processors,
+ cuda=use_cuda,
+ dev_id=dev_id,
+ output_in_place=output_in_place,
+ log_verbose=verbose,
+ )
+
+ async def set_weights(
+ self,
+ wallet: "Wallet",
+ netuids: list[int],
+ weights: list[float],
+ prompt: bool,
+ ):
+ """Set weights for root network."""
+ netuids_ = np.array(netuids, dtype=np.int64)
+ weights_ = np.array(weights, dtype=np.float32)
+ logging.info(f"Setting weights in network: {self.network}")
+ # Run the set weights operation.
+ return await set_root_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuids=netuids_,
+ weights=weights_,
+ version_key=0,
+ prompt=prompt,
+ wait_for_finalization=True,
+ wait_for_inclusion=True,
+ )
diff --git a/bittensor/core/chain_data/__init__.py b/bittensor/core/chain_data/__init__.py
index 9ad1e38881..68936a6b5f 100644
--- a/bittensor/core/chain_data/__init__.py
+++ b/bittensor/core/chain_data/__init__.py
@@ -17,6 +17,6 @@
from .stake_info import StakeInfo
from .subnet_hyperparameters import SubnetHyperparameters
from .subnet_info import SubnetInfo
-from .utils import custom_rpc_type_registry
+from .utils import custom_rpc_type_registry, decode_account_id, process_stake_data
ProposalCallData = GenericCall
diff --git a/bittensor/core/extrinsics/async_registration.py b/bittensor/core/extrinsics/async_registration.py
new file mode 100644
index 0000000000..4da7785b1b
--- /dev/null
+++ b/bittensor/core/extrinsics/async_registration.py
@@ -0,0 +1,1609 @@
+import asyncio
+import binascii
+import functools
+import hashlib
+import io
+import math
+import multiprocessing as mp
+import os
+import random
+import subprocess
+import time
+import typing
+from contextlib import redirect_stdout
+from dataclasses import dataclass
+from datetime import timedelta
+from multiprocessing import Process, Event, Lock, Array, Value, Queue
+from multiprocessing.queues import Queue as Queue_Type
+from queue import Empty, Full
+from typing import Optional
+
+import backoff
+import numpy as np
+from Crypto.Hash import keccak
+from bittensor_wallet import Wallet
+from bittensor_wallet.errors import KeyFileError
+from rich.console import Console
+from rich.prompt import Confirm
+from rich.status import Status
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.chain_data import NeuronInfo
+from bittensor.utils import format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.formatting import millify, get_human_readable
+
+if typing.TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+# TODO: compair and remove existing code (bittensor.utils.registration)
+
+
+def use_torch() -> bool:
+ """Force the use of torch over numpy for certain operations."""
+ return True if os.getenv("USE_TORCH") == "1" else False
+
+
+def legacy_torch_api_compat(func: typing.Callable):
+ """
+ Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True.
+
+ :param func: Function with numpy Input/Output to be decorated.
+
+ :return: Decorated function
+ """
+
+ @functools.wraps(func)
+ def decorated(*args, **kwargs):
+ if use_torch():
+ # if argument is a Torch tensor, convert it to numpy
+ args = [
+ arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg
+ for arg in args
+ ]
+ kwargs = {
+ key: value.cpu().numpy() if isinstance(value, torch.Tensor) else value
+ for key, value in kwargs.items()
+ }
+ ret = func(*args, **kwargs)
+ if use_torch():
+ # if return value is a numpy array, convert it to Torch tensor
+ if isinstance(ret, np.ndarray):
+ ret = torch.from_numpy(ret)
+ return ret
+
+ return decorated
+
+
+@functools.cache
+def _get_real_torch():
+ try:
+ import torch as _real_torch
+ except ImportError:
+ _real_torch = None
+ return _real_torch
+
+
+def log_no_torch_error():
+ logging.info(
+ "This command requires torch. You can install torch with `pip install torch` and run the command again."
+ )
+
+
+@dataclass
+class POWSolution:
+ """A solution to the registration PoW problem."""
+
+ nonce: int
+ block_number: int
+ difficulty: int
+ seal: bytes
+
+ async def is_stale(self, subtensor: "AsyncSubtensor") -> bool:
+ """Returns True if the POW is stale.
+ This means the block the POW is solved for is within 3 blocks of the current block.
+ """
+ current_block = await subtensor.substrate.get_block_number(None)
+ return self.block_number < current_block - 3
+
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: str
+
+
+class RegistrationStatisticsLogger:
+ """Logs statistics for a registration."""
+
+ console: Console
+ status: Optional[Status]
+
+ def __init__(
+ self, console_: Optional["Console"] = None, output_in_place: bool = True
+ ) -> None:
+ if console_ is None:
+ console_ = Console()
+ self.console = console_
+
+ if output_in_place:
+ self.status = self.console.status("Solving")
+ else:
+ self.status = None
+
+ def start(self) -> None:
+ if self.status is not None:
+ self.status.start()
+
+ def stop(self) -> None:
+ if self.status is not None:
+ self.status.stop()
+
+ @classmethod
+ def get_status_message(
+ cls, stats: RegistrationStatistics, verbose: bool = False
+ ) -> str:
+ """
+ Provides a message of the current status of the block solving as a str for a logger or stdout
+ """
+ message = (
+ "Solving\n"
+ + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n"
+ + (
+ f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n"
+ + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n"
+ if verbose
+ else ""
+ )
+ + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n"
+ + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / "
+ + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n"
+ + f"Block Number: [bold white]{stats.block_number}[/bold white]\n"
+ + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n"
+ )
+ return message
+
+ def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None:
+ """
+ Passes the current status to the logger
+ """
+ if self.status is not None:
+ self.status.update(self.get_status_message(stats, verbose=verbose))
+ else:
+ self.console.log(self.get_status_message(stats, verbose=verbose))
+
+
+class _SolverBase(Process):
+ """
+ A process that solves the registration PoW problem.
+
+ :param proc_num: The number of the process being created.
+ :param num_proc: The total number of processes running.
+ :param update_interval: The number of nonces to try to solve before checking for a new block.
+ :param finished_queue: The queue to put the process number when a process finishes each update_interval.
+ Used for calculating the average time per update_interval across all processes.
+ :param solution_queue: The queue to put the solution the process has found during the pow solve.
+ :param stop_event: The event to set by the main process when all the solver processes should stop.
+ The solver process will check for the event after each update_interval.
+ The solver process will stop when the event is set.
+ Used to stop the solver processes when a solution is found.
+ :param curr_block: The array containing this process's current block hash.
+ The main process will set the array to the new block hash when a new block is finalized in the
+ network. The solver process will get the new block hash from this array when newBlockEvent is set
+ :param curr_block_num: The value containing this process's current block number.
+ The main process will set the value to the new block number when a new block is finalized in
+ the network. The solver process will get the new block number from this value when
+ new_block_event is set.
+ :param curr_diff: The array containing this process's current difficulty. The main process will set the array to
+ the new difficulty when a new block is finalized in the network. The solver process will get the
+ new difficulty from this array when newBlockEvent is set.
+ :param check_block: The lock to prevent this process from getting the new block data while the main process is
+ updating the data.
+ :param limit: The limit of the pow solve for a valid solution.
+
+ :var new_block_event: The event to set by the main process when a new block is finalized in the network.
+ The solver process will check for the event after each update_interval.
+ The solver process will get the new block hash and difficulty and start solving for a new
+ nonce.
+ """
+
+ proc_num: int
+ num_proc: int
+ update_interval: int
+ finished_queue: Queue_Type
+ solution_queue: Queue_Type
+ new_block_event: Event
+ stop_event: Event
+ hotkey_bytes: bytes
+ curr_block: Array
+ curr_block_num: Value
+ curr_diff: Array
+ check_block: Lock
+ limit: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ ):
+ Process.__init__(self, daemon=True)
+ self.proc_num = proc_num
+ self.num_proc = num_proc
+ self.update_interval = update_interval
+ self.finished_queue = finished_queue
+ self.solution_queue = solution_queue
+ self.new_block_event = Event()
+ self.new_block_event.clear()
+ self.curr_block = curr_block
+ self.curr_block_num = curr_block_num
+ self.curr_diff = curr_diff
+ self.check_block = check_block
+ self.stop_event = stop_event
+ self.limit = limit
+
+ def run(self):
+ raise NotImplementedError("_SolverBase is an abstract class")
+
+ @staticmethod
+ def create_shared_memory() -> tuple[Array, Value, Array]:
+ """Creates shared memory for the solver processes to use."""
+ curr_block = Array("h", 32, lock=True) # byte array
+ curr_block_num = Value("i", 0, lock=True) # int
+ curr_diff = Array("Q", [0, 0], lock=True) # [high, low]
+
+ return curr_block, curr_block_num, curr_diff
+
+
+class _Solver(_SolverBase):
+ """
+ Performs POW Solution
+ """
+
+ def run(self):
+ block_number: int
+ block_and_hotkey_hash_bytes: bytes
+ block_difficulty: int
+ nonce_limit = int(math.pow(2, 64)) - 1
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_end = nonce_start + self.update_interval
+ while not self.stop_event.is_set():
+ if self.new_block_event.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.new_block_event.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block(
+ nonce_start,
+ nonce_end,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Send time
+ self.finished_queue.put_nowait(self.proc_num)
+ except Full:
+ pass
+
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_start = nonce_start % nonce_limit
+ nonce_end = nonce_start + self.update_interval
+
+
+class _CUDASolver(_SolverBase):
+ """
+ Performs POW Solution using CUDA
+ """
+
+ dev_id: int
+ tpb: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id: int,
+ tpb: int,
+ ):
+ super().__init__(
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ self.dev_id = dev_id
+ self.tpb = tpb
+
+ def run(self):
+ block_number: int = 0 # dummy value
+ block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value
+ block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value
+ nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ while not self.stop_event.is_set():
+ if self.new_block_event.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.new_block_event.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block_cuda(
+ nonce_start,
+ self.update_interval,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ self.dev_id,
+ self.tpb,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Signal that a nonce_block was finished using queue
+ # send our proc_num
+ self.finished_queue.put(self.proc_num)
+ except Full:
+ pass
+
+ # increase nonce by number of nonces processed
+ nonce_start += self.update_interval * self.tpb
+ nonce_start = nonce_start % nonce_limit
+
+
+class LazyLoadedTorch:
+ def __bool__(self):
+ return bool(_get_real_torch())
+
+ def __getattr__(self, name):
+ if real_torch := _get_real_torch():
+ return getattr(real_torch, name)
+ else:
+ log_no_torch_error()
+ raise ImportError("torch not installed")
+
+
+if typing.TYPE_CHECKING:
+ import torch
+else:
+ torch = LazyLoadedTorch()
+
+
+class MaxSuccessException(Exception):
+ """
+ Raised when the POW Solver has reached the max number of successful solutions
+ """
+
+
+class MaxAttemptsException(Exception):
+ """
+ Raised when the POW Solver has reached the max number of attempts
+ """
+
+
+async def is_hotkey_registered(
+ subtensor: "AsyncSubtensor", netuid: int, hotkey_ss58: str
+) -> bool:
+ """Checks to see if the hotkey is registered on a given netuid"""
+ _result = await subtensor.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ )
+ if _result is not None:
+ return True
+ else:
+ return False
+
+
+async def register_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: typing.Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> bool:
+ """Registers the wallet to the chain.
+
+ :param subtensor: initialized AsyncSubtensor object to use for chain interactions
+ :param wallet: Bittensor wallet object.
+ :param netuid: The ``netuid`` of the subnet to register on.
+ :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns
+ `False` if the extrinsic fails to enter the block within the timeout.
+ :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`,
+ or returns `False` if the extrinsic fails to be finalized within the timeout.
+ :param prompt: If `True`, the call waits for confirmation from the user before proceeding.
+ :param max_allowed_attempts: Maximum number of attempts to register the wallet.
+ :param output_in_place: Whether the POW solving should be outputted to the console as it goes along.
+ :param cuda: If `True`, the wallet should be registered using CUDA device(s).
+ :param dev_id: The CUDA device id to use, or a list of device ids.
+ :param tpb: The number of threads per block (CUDA).
+ :param num_processes: The number of processes to use to register.
+ :param update_interval: The number of nonces to solve between updates.
+ :param log_verbose: If `True`, the registration process will log more information.
+
+ :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion,
+ the response is `True`.
+ """
+
+ async def get_neuron_for_pubkey_and_subnet():
+ uid = await subtensor.substrate.query(
+ "SubtensorModule", "Uids", [netuid, wallet.hotkey.ss58_address]
+ )
+ if uid is None:
+ return NeuronInfo.get_null_neuron()
+
+ params = [netuid, uid]
+ json_body = await subtensor.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params,
+ )
+
+ if not (result := json_body.get("result", None)):
+ return NeuronInfo.get_null_neuron()
+
+ return NeuronInfo.from_vec_u8(bytes(result))
+
+ logging.debug("Checking subnet status")
+ if not await subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed error: subnet {netuid} does not exist."
+ )
+ return False
+
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid} ..."
+ )
+ neuron = await get_neuron_for_pubkey_and_subnet()
+ if not neuron.is_null:
+ logging.debug(
+ f"Wallet {wallet} is already registered on subnet {neuron.netuid} with uid{neuron.uid}."
+ )
+ return True
+
+ if prompt:
+ if not Confirm.ask(
+ f"Continue Registration?\n"
+ f" hotkey ({wallet.hotkey_str}):\t[bold white]{wallet.hotkey.ss58_address}[/bold white]\n"
+ f" coldkey ({wallet.name}):\t[bold white]{wallet.coldkeypub.ss58_address}[/bold white]\n"
+ f" network:\t\t[bold white]{subtensor.network}[/bold white]"
+ ):
+ return False
+
+ if not torch:
+ log_no_torch_error()
+ return False
+
+ # Attempt rolling registration.
+ attempts = 1
+ pow_result: Optional[POWSolution]
+ while True:
+ logging.info(
+ f":satellite: Registering... ({attempts}/{max_allowed_attempts})"
+ )
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ if prompt:
+ logging.info("CUDA is not available.")
+ return False
+ pow_result = await create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result = await create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ # pow failed
+ if not pow_result:
+ # might be registered already on this subnet
+ is_registered = await is_hotkey_registered(
+ subtensor, netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.error(
+ f":white_heavy_check_mark: Already registered on netuid: {netuid}"
+ )
+ return True
+
+ # pow successful, proceed to submit pow to chain for registration
+ else:
+ logging.info(":satellite: Submitting POW...")
+ # check if pow result is still valid
+ while not await pow_result.is_stale(subtensor=subtensor):
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="register",
+ call_params={
+ "netuid": netuid,
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ "hotkey": wallet.hotkey.ss58_address,
+ "coldkey": wallet.coldkeypub.ss58_address,
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.hotkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ if not wait_for_finalization and not wait_for_inclusion:
+ success, err_msg = True, ""
+ else:
+ await response.process_events()
+ success = await response.is_success
+ if not success:
+ success, err_msg = (
+ False,
+ format_error_message(
+ await response.error_message,
+ substrate=subtensor.substrate,
+ ),
+ )
+ # Look error here
+ # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs
+
+ if "HotKeyAlreadyRegisteredInSubNet" in err_msg:
+ logging.info(
+ f":white_heavy_check_mark: Already Registered on subnet: {netuid}."
+ )
+ return True
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ await asyncio.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ if success:
+ logging.info(":satellite: Checking Registration status...")
+ is_registered = await is_hotkey_registered(
+ subtensor,
+ netuid=netuid,
+ hotkey_ss58=wallet.hotkey.ss58_address,
+ )
+ if is_registered:
+ logging.success(
+ ":white_heavy_check_mark: Registered"
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(
+ ":cross_mark: Unknown error. Neuron not found."
+ )
+ continue
+ else:
+ # Exited loop because pow is no longer valid.
+ logging.error("POW is stale.")
+ # Try again.
+ continue
+
+ if attempts < max_allowed_attempts:
+ # Failed registration, retry pow
+ attempts += 1
+ logging.error(
+ f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})"
+ )
+ else:
+ # Failed to register after max attempts.
+ logging.error("No more attempts.")
+ return False
+
+
+async def run_faucet_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: int = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+ max_successes: int = 3,
+) -> tuple[bool, str]:
+ r"""Runs a continual POW to get a faucet of TAO on the test net.
+
+ :param subtensor: The subtensor interface object used to run the extrinsic
+ :param wallet: Bittensor wallet object.
+ :param prompt: If `True`, the call waits for confirmation from the user before proceeding.
+ :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`,
+ or returns `False` if the extrinsic fails to enter the block within the timeout.
+ :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`,
+ or returns `False` if the extrinsic fails to be finalized within the timeout.
+ :param max_allowed_attempts: Maximum number of attempts to register the wallet.
+ :param output_in_place: Whether to output logging data as the process runs.
+ :param cuda: If `True`, the wallet should be registered using CUDA device(s).
+ :param dev_id: The CUDA device id to use
+ :param tpb: The number of threads per block (CUDA).
+ :param num_processes: The number of processes to use to register.
+ :param update_interval: The number of nonces to solve between updates.
+ :param log_verbose: If `True`, the registration process will log more information.
+ :param max_successes: The maximum number of successful faucet runs for the wallet.
+
+ :return: `True` if extrinsic was finalized or included in the block. If we did not wait for
+ finalization/inclusion, the response is also `True`
+ """
+ if prompt:
+ if not Confirm.ask(
+ "Run Faucet?\n"
+ f" wallet name: [bold white]{wallet.name}[/bold white]\n"
+ f" coldkey: [bold white]{wallet.coldkeypub.ss58_address}[/bold white]\n"
+ f" network: [bold white]{subtensor}[/bold white]"
+ ):
+ return False, ""
+
+ if not torch:
+ log_no_torch_error()
+ return False, "Requires torch"
+
+ # Unlock coldkey
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ return False, "There was an error unlocking your coldkey"
+
+ # Get previous balance.
+ old_balance = await subtensor.get_balance(wallet.coldkeypub.ss58_address)
+
+ # Attempt rolling registration.
+ attempts = 1
+ successes = 1
+ while True:
+ try:
+ pow_result = None
+ while pow_result is None or await pow_result.is_stale(subtensor=subtensor):
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ if prompt:
+ logging.error("CUDA is not available.")
+ return False, "CUDA is not available."
+ pow_result: Optional[POWSolution] = await create_pow(
+ subtensor,
+ wallet,
+ -1,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result: Optional[POWSolution] = await create_pow(
+ subtensor,
+ wallet,
+ -1,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="faucet",
+ call_params={
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # process if registration successful, try again if pow is still valid
+ await response.process_events()
+ if not await response.is_success:
+ logging.error(
+ f":cross_mark: Failed: {format_error_message(await response.error_message, subtensor.substrate)}"
+ )
+ if attempts == max_allowed_attempts:
+ raise MaxAttemptsException
+ attempts += 1
+ # Wait a bit before trying again
+ time.sleep(1)
+
+ # Successful registration
+ else:
+ new_balance = await subtensor.get_balance(
+ wallet.coldkeypub.ss58_address
+ )
+ logging.info(
+ f"Balance: {old_balance[wallet.coldkeypub.ss58_address]} :arrow_right: {new_balance[wallet.coldkeypub.ss58_address]}"
+ )
+ old_balance = new_balance
+
+ if successes == max_successes:
+ raise MaxSuccessException
+
+ attempts = 1 # Reset attempts on success
+ successes += 1
+
+ except KeyboardInterrupt:
+ return True, "Done"
+
+ except MaxSuccessException:
+ return True, f"Max successes reached: {3}"
+
+ except MaxAttemptsException:
+ return False, f"Max attempts reached: {max_allowed_attempts}"
+
+
+async def _check_for_newest_block_and_update(
+ subtensor: "AsyncSubtensor",
+ netuid: int,
+ old_block_number: int,
+ hotkey_bytes: bytes,
+ curr_diff: Array,
+ curr_block: Array,
+ curr_block_num: Value,
+ update_curr_block: typing.Callable,
+ check_block: Lock,
+ solvers: list[_Solver],
+ curr_stats: RegistrationStatistics,
+) -> int:
+ """
+ Checks for a new block and updates the current block information if a new block is found.
+
+ :param subtensor: The subtensor object to use for getting the current block.
+ :param netuid: The netuid to use for retrieving the difficulty.
+ :param old_block_number: The old block number to check against.
+ :param hotkey_bytes: The bytes of the hotkey's pubkey.
+ :param curr_diff: The current difficulty as a multiprocessing array.
+ :param curr_block: Where the current block is stored as a multiprocessing array.
+ :param curr_block_num: Where the current block number is stored as a multiprocessing value.
+ :param update_curr_block: A function that updates the current block.
+ :param check_block: A mp lock that is used to check for a new block.
+ :param solvers: A list of solvers to update the current block for.
+ :param curr_stats: The current registration statistics to update.
+
+ :return: The current block number.
+ """
+ block_number = await subtensor.substrate.get_block_number(None)
+ if block_number != old_block_number:
+ old_block_number = block_number
+ # update block information
+ block_number, difficulty, block_hash = await _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+ block_bytes = bytes.fromhex(block_hash[2:])
+
+ update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+ # Set new block events for each solver
+
+ for worker in solvers:
+ worker.new_block_event.set()
+
+ # update stats
+ curr_stats.block_number = block_number
+ curr_stats.block_hash = block_hash
+ curr_stats.difficulty = difficulty
+
+ return old_block_number
+
+
+async def _block_solver(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ num_processes: int,
+ netuid: int,
+ dev_id: list[int],
+ tpb: int,
+ update_interval: int,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ n_samples,
+ alpha_,
+ output_in_place,
+ log_verbose,
+ cuda: bool,
+):
+ """
+ Shared code used by the Solvers to solve the POW solution
+ """
+ limit = int(math.pow(2, 256)) - 1
+
+ # Establish communication queues
+ ## See the _Solver class for more information on the queues.
+ stop_event = Event()
+ stop_event.clear()
+
+ solution_queue = Queue()
+ finished_queues = [Queue() for _ in range(num_processes)]
+ check_block = Lock()
+
+ hotkey_bytes = (
+ wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key
+ )
+
+ if cuda:
+ ## Create a worker per CUDA device
+ num_processes = len(dev_id)
+ solvers = [
+ _CUDASolver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id[i],
+ tpb,
+ )
+ for i in range(num_processes)
+ ]
+ else:
+ # Start consumers
+ solvers = [
+ _Solver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = await _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.new_block_event.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0,
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ solution = None
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ timeout = 0.15 if cuda else 0.15
+ while netuid == -1 or not await is_hotkey_registered(
+ subtensor, netuid, wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=timeout)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = await _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ old_block_number=old_block_number,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ for finished_queue in finished_queues:
+ try:
+ finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ if cuda:
+ hash_rate_ = (num_time * tpb * update_interval) / time_since_last
+ else:
+ hash_rate_ = (num_time * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ if cuda:
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * (tpb * update_interval)
+ ) / new_time_spent_total
+ else:
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * update_interval
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, solution contains the nonce or wallet is registered
+ stop_event.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+async def _solve_for_difficulty_fast_cuda(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ update_interval: int = 50_000,
+ tpb: int = 512,
+ dev_id: typing.Union[list[int], int] = 0,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional[POWSolution]:
+ """
+ Solves the registration fast using CUDA
+
+ :param subtensor: The subtensor node to grab blocks
+ :param wallet: The wallet to register
+ :param netuid: The netuid of the subnet to register to.
+ :param output_in_place: If true, prints the output in place, otherwise prints to new lines
+ :param update_interval: The number of nonces to try before checking for more blocks
+ :param tpb: The number of threads per block. CUDA param that should match the GPU capability
+ :param dev_id: The CUDA device IDs to execute the registration on, either a single device or a list of devices
+ :param n_samples: The number of samples of the hash_rate to keep for the EWMA
+ :param alpha_: The alpha for the EWMA for the hash_rate calculation
+ :param log_verbose: If true, prints more verbose logging of the registration metrics.
+
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more
+ robust.
+ """
+ if isinstance(dev_id, int):
+ dev_id = [dev_id]
+ elif dev_id is None:
+ dev_id = [0]
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ if not torch.cuda.is_available():
+ raise Exception("CUDA not available")
+
+ # Set mp start to use spawn so CUDA doesn't complain
+ with _UsingSpawnStartMethod(force=True):
+ curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory()
+
+ solution = await _block_solver(
+ subtensor=subtensor,
+ wallet=wallet,
+ num_processes=None,
+ netuid=netuid,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_diff=curr_diff,
+ n_samples=n_samples,
+ alpha_=alpha_,
+ output_in_place=output_in_place,
+ log_verbose=log_verbose,
+ cuda=True,
+ )
+
+ return solution
+
+
+async def _solve_for_difficulty_fast(
+ subtensor,
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional[POWSolution]:
+ """
+ Solves the POW for registration using multiprocessing.
+
+ :param subtensor: Subtensor to connect to for block information and to submit.
+ :param wallet: wallet to use for registration.
+ :param netuid: The netuid of the subnet to register to.
+ :param output_in_place: If true, prints the status in place. Otherwise, prints the status on a new line.
+ :param num_processes: Number of processes to use.
+ :param update_interval: Number of nonces to solve before updating block information.
+ :param n_samples: The number of samples of the hash_rate to keep for the EWMA
+ :param alpha_: The alpha for the EWMA for the hash_rate calculation
+ :param log_verbose: If true, prints more verbose logging of the registration metrics.
+
+ Notes:
+
+ - The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ - We can also modify the update interval to do smaller blocks of work, while still updating the block information
+ after a different number of nonces, to increase the transparency of the process while still keeping the speed.
+ """
+ if not num_processes:
+ # get the number of allowed processes for this process
+ num_processes = min(1, get_cpu_count())
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory()
+
+ solution = await _block_solver(
+ subtensor=subtensor,
+ wallet=wallet,
+ num_processes=num_processes,
+ netuid=netuid,
+ dev_id=None,
+ tpb=None,
+ update_interval=update_interval,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_diff=curr_diff,
+ n_samples=n_samples,
+ alpha_=alpha_,
+ output_in_place=output_in_place,
+ log_verbose=log_verbose,
+ cuda=False,
+ )
+
+ return solution
+
+
+def _terminate_workers_and_wait_for_exit(
+ workers: list[typing.Union[Process, Queue_Type]],
+) -> None:
+ for worker in workers:
+ if isinstance(worker, Queue_Type):
+ worker.join_thread()
+ else:
+ try:
+ worker.join(3.0)
+ except subprocess.TimeoutExpired:
+ worker.terminate()
+ try:
+ worker.close()
+ except ValueError:
+ worker.terminate()
+
+
+# TODO verify this works with async
+@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3)
+async def _get_block_with_retry(
+ subtensor: "AsyncSubtensor", netuid: int
+) -> tuple[int, int, bytes]:
+ """
+ Gets the current block number, difficulty, and block hash from the substrate node.
+
+ :param subtensor: The subtensor object to use to get the block number, difficulty, and block hash.
+ :param netuid: The netuid of the network to get the block number, difficulty, and block hash from.
+
+ :return: The current block number, difficulty of the subnet, block hash
+
+ :raises Exception: If the block hash is None.
+ :raises ValueError: If the difficulty is None.
+ """
+ block_number = await subtensor.substrate.get_block_number(None)
+ block_hash = await subtensor.substrate.get_block_hash(
+ block_number
+ ) # TODO check if I need to do all this
+ try:
+ difficulty = (
+ 1_000_000
+ if netuid == -1
+ else int(
+ await subtensor.get_hyperparameter(
+ param_name="Difficulty", netuid=netuid, block_hash=block_hash
+ )
+ )
+ )
+ except TypeError:
+ raise ValueError("Chain error. Difficulty is None")
+ except SubstrateRequestException:
+ raise Exception(
+ "Network error. Could not connect to substrate to get block hash"
+ )
+ return block_number, difficulty, block_hash
+
+
+def _registration_diff_unpack(packed_diff: Array) -> int:
+ """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian."""
+ return int(packed_diff[0] << 32 | packed_diff[1])
+
+
+def _registration_diff_pack(diff: int, packed_diff: Array):
+ """Packs the difficulty into two 32-bit integers. Little endian."""
+ packed_diff[0] = diff >> 32
+ packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits
+
+
+class _UsingSpawnStartMethod:
+ def __init__(self, force: bool = False):
+ self._old_start_method = None
+ self._force = force
+
+ def __enter__(self):
+ self._old_start_method = mp.get_start_method(allow_none=True)
+ if self._old_start_method is None:
+ self._old_start_method = "spawn" # default to spawn
+
+ mp.set_start_method("spawn", force=self._force)
+
+ def __exit__(self, *args):
+ # restore the old start method
+ mp.set_start_method(self._old_start_method, force=True)
+
+
+async def create_pow(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: typing.Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: int = None,
+ update_interval: int = None,
+ log_verbose: bool = False,
+) -> Optional[dict[str, typing.Any]]:
+ """
+ Creates a proof of work for the given subtensor and wallet.
+
+ :param subtensor: The subtensor to create a proof of work for.
+ :param wallet: The wallet to create a proof of work for.
+ :param netuid: The netuid for the subnet to create a proof of work for.
+ :param output_in_place: If true, prints the progress of the proof of work to the console
+ in-place. Meaning the progress is printed on the same lines.
+ :param cuda: If true, uses CUDA to solve the proof of work.
+ :param dev_id: The CUDA device id(s) to use. If cuda is true and dev_id is a list,
+ then multiple CUDA devices will be used to solve the proof of work.
+ :param tpb: The number of threads per block to use when solving the proof of work. Should be a multiple of 32.
+ :param num_processes: The number of processes to use when solving the proof of work.
+ If None, then the number of processes is equal to the number of CPU cores.
+ :param update_interval: The number of nonces to run before checking for a new block.
+ :param log_verbose: If true, prints the progress of the proof of work more verbosely.
+
+ :return: The proof of work solution or None if the wallet is already registered or there is a different error.
+
+ :raises ValueError: If the subnet does not exist.
+ """
+ if netuid != -1:
+ if not await subtensor.subnet_exists(netuid=netuid):
+ raise ValueError(f"Subnet {netuid} does not exist")
+
+ if cuda:
+ solution: Optional[POWSolution] = await _solve_for_difficulty_fast_cuda(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ solution: Optional[POWSolution] = await _solve_for_difficulty_fast(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ return solution
+
+
+def _solve_for_nonce_block_cuda(
+ nonce_start: int,
+ update_interval: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+ dev_id: int,
+ tpb: int,
+) -> Optional[POWSolution]:
+ """
+ Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb
+ """
+ solution, seal = solve_cuda(
+ nonce_start,
+ update_interval,
+ tpb,
+ block_and_hotkey_hash_bytes,
+ difficulty,
+ limit,
+ dev_id,
+ )
+
+ if solution != -1:
+ # Check if solution is valid (i.e. not -1)
+ return POWSolution(solution, block_number, difficulty, seal)
+
+ return None
+
+
+def _solve_for_nonce_block(
+ nonce_start: int,
+ nonce_end: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+) -> Optional[POWSolution]:
+ """
+ Tries to solve the POW for a block of nonces (nonce_start, nonce_end)
+ """
+ for nonce in range(nonce_start, nonce_end):
+ # Create seal.
+ seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce)
+
+ # Check if seal meets difficulty
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ # Found a solution, save it.
+ return POWSolution(nonce, block_number, difficulty, seal)
+
+ return None
+
+
+class CUDAException(Exception):
+ """An exception raised when an error occurs in the CUDA environment."""
+
+
+def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)]
+ return hex_chunks
+
+
+def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes:
+ """
+ Create a cryptographic seal hash from the given block and hotkey hash bytes and nonce.
+
+ This function generates a seal hash by combining the given block and hotkey hash bytes with a nonce.
+ It first converts the nonce to a byte representation, then concatenates it with the first 64 hex
+ characters of the block and hotkey hash bytes. The result is then hashed using SHA-256 followed by
+ the Keccak-256 algorithm to produce the final seal hash.
+
+ :param block_and_hotkey_hash_bytes: The combined hash bytes of the block and hotkey.
+ :param nonce: The nonce value used for hashing.
+
+ :return: The resulting seal hash.
+ """
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update(seal_sh256).digest()
+ return seal
+
+
+def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int) -> bool:
+ """Determines if a seal meets the specified difficulty"""
+ seal_number = int.from_bytes(seal, "big")
+ product = seal_number * difficulty
+ return product < limit
+
+
+def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes:
+ """Hashes the block with the hotkey using Keccak-256 to get 32 bytes"""
+ kec = keccak.new(digest_bits=256)
+ kec = kec.update(bytearray(block_bytes + hotkey_bytes))
+ block_and_hotkey_hash_bytes = kec.digest()
+ return block_and_hotkey_hash_bytes
+
+
+def _update_curr_block(
+ curr_diff: Array,
+ curr_block: Array,
+ curr_block_num: Value,
+ block_number: int,
+ block_bytes: bytes,
+ diff: int,
+ hotkey_bytes: bytes,
+ lock: Lock,
+):
+ """
+ Update the current block data with the provided block information and difficulty.
+
+ This function updates the current block and its difficulty in a thread-safe manner. It sets the current block
+ number, hashes the block with the hotkey, updates the current block bytes, and packs the difficulty.
+
+ :param curr_diff: Shared array to store the current difficulty.
+ :param curr_block: Shared array to store the current block data.
+ :param curr_block_num: Shared value to store the current block number.
+ :param block_number: The block number to set as the current block number.
+ :param block_bytes: The block data bytes to be hashed with the hotkey.
+ :param diff: The difficulty value to be packed into the current difficulty array.
+ :param hotkey_bytes: The hotkey bytes used for hashing the block.
+ :param lock: A lock to ensure thread-safe updates.
+ """
+ with lock:
+ curr_block_num.value = block_number
+ # Hash the block with the hotkey
+ block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes)
+ for i in range(32):
+ curr_block[i] = block_and_hotkey_hash_bytes[i]
+ _registration_diff_pack(diff, curr_diff)
+
+
+def get_cpu_count() -> int:
+ try:
+ return len(os.sched_getaffinity(0))
+ except AttributeError:
+ # macOS does not have sched_getaffinity
+ return os.cpu_count()
+
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: bytes
+
+
+def solve_cuda(
+ nonce_start: np.int64,
+ update_interval: np.int64,
+ tpb: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ dev_id: int = 0,
+) -> tuple[np.int64, bytes]:
+ """
+ Solves the PoW problem using CUDA.
+
+ :param nonce_start: Starting nonce.
+ :param update_interval: Number of nonces to solve before updating block information.
+ :param tpb: Threads per block.
+ :param block_and_hotkey_hash_bytes: Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes.
+ :param difficulty: Difficulty of the PoW problem.
+ :param limit: Upper limit of the nonce.
+ :param dev_id: The CUDA device ID
+
+ :return: (nonce, seal) corresponding to the solution. Returns -1 for nonce if no solution is found.
+ """
+
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ upper = int(limit // difficulty)
+
+ upper_bytes = upper.to_bytes(32, byteorder="little", signed=False)
+
+ # Call cython function
+ # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit,
+ # const unsigned char[:] block_bytes, int dev_id
+ block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+
+ solution = cubit.solve_cuda(
+ tpb,
+ nonce_start,
+ update_interval,
+ upper_bytes,
+ block_and_hotkey_hash_hex,
+ dev_id,
+ ) # 0 is first GPU
+ seal = None
+ if solution != -1:
+ seal = _create_seal_hash(block_and_hotkey_hash_hex, solution)
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ return solution, seal
+ else:
+ return -1, b"\x00" * 32
+
+ return solution, seal
+
+
+def reset_cuda():
+ """
+ Resets the CUDA environment.
+ """
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ cubit.reset_cuda()
+
+
+def log_cuda_errors() -> str:
+ """
+ Logs any CUDA errors.
+ """
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ f = io.StringIO()
+ with redirect_stdout(f):
+ cubit.log_cuda_errors()
+
+ s = f.getvalue()
+
+ return s
diff --git a/bittensor/core/extrinsics/async_root.py b/bittensor/core/extrinsics/async_root.py
new file mode 100644
index 0000000000..9e73f98a30
--- /dev/null
+++ b/bittensor/core/extrinsics/async_root.py
@@ -0,0 +1,245 @@
+import asyncio
+import time
+from typing import Union, TYPE_CHECKING
+
+import numpy as np
+from bittensor_wallet import Wallet
+from bittensor_wallet.errors import KeyFileError
+from numpy.typing import NDArray
+from rich.prompt import Confirm
+from rich.table import Table, Column
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.utils import u16_normalized_float, format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.weight_utils import (
+ normalize_max_weight,
+ convert_weights_and_uids_for_emit,
+)
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]:
+ # Get weight restrictions.
+ maw, mwl = await asyncio.gather(
+ subtensor.get_hyperparameter("MinAllowedWeights", netuid=0),
+ subtensor.get_hyperparameter("MaxWeightsLimit", netuid=0),
+ )
+ min_allowed_weights = int(maw)
+ max_weight_limit = u16_normalized_float(int(mwl))
+ return min_allowed_weights, max_weight_limit
+
+
+async def root_register_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+) -> bool:
+ """Registers the wallet to root network.
+
+ :param subtensor: The AsyncSubtensor object
+ :param wallet: Bittensor wallet object.
+ :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ :param prompt: If `True`, the call waits for confirmation from the user before proceeding.
+
+ :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error("Error decrypting coldkey (possibly incorrect password)")
+ return False
+
+ logging.debug(
+ f"Checking if hotkey ({wallet.hotkey_str}) is registered on root."
+ )
+ is_registered = await subtensor.is_hotkey_registered(
+ netuid=0, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.error(
+ ":white_heavy_check_mark: Already registered on root network."
+ )
+ return True
+
+ logging.info(":satellite: Registering to root network...")
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="root_register",
+ call_params={"hotkey": wallet.hotkey.ss58_address},
+ )
+ success, err_msg = await subtensor.sign_and_send_extrinsic(
+ call,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ time.sleep(0.5)
+ return False
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ uid = await subtensor.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[0, wallet.hotkey.ss58_address],
+ )
+ if uid is not None:
+ logging.info(
+ f":white_heavy_check_mark: Registered with UID {uid}"
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+ return False
+
+
+async def set_root_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuids: Union[NDArray[np.int64], list[int]],
+ weights: Union[NDArray[np.float32], list[float]],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ prompt: bool = False,
+) -> bool:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ :param subtensor: The AsyncSubtensor object
+ :param wallet: Bittensor wallet object.
+ :param netuids: The `netuid` of the subnet to set weights for.
+ :param weights: Weights to set. These must be `float` s and must correspond to the passed `netuid` s.
+ :param version_key: The version key of the validator.
+ :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns
+ `False` if the extrinsic fails to enter the block within the timeout.
+ :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`,
+ or returns `False` if the extrinsic fails to be finalized within the timeout.
+ :param prompt: If `True`, the call waits for confirmation from the user before proceeding.
+ :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion,
+ the response is `True`.
+ """
+
+ async def _do_set_weights():
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_root_weights",
+ call_params={
+ "dests": weight_uids,
+ "weights": weight_vals,
+ "netuid": 0,
+ "version_key": version_key,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.coldkey,
+ era={"period": 5},
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ await response.process_events()
+ if await response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, await response.error_message
+
+ my_uid = await subtensor.substrate.query(
+ "SubtensorModule", "Uids", [0, wallet.hotkey.ss58_address]
+ )
+
+ if my_uid is None:
+ logging.error("Your hotkey is not registered to the root network")
+ return False
+
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error("Error decrypting coldkey (possibly incorrect password)")
+ return False
+
+ # First convert types.
+ if isinstance(netuids, list):
+ netuids = np.array(netuids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ logging.debug("Fetching weight limits")
+ min_allowed_weights, max_weight_limit = await get_limits(subtensor)
+
+ # Get non zero values.
+ non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1)
+ non_zero_weights = weights[non_zero_weight_idx]
+ if non_zero_weights.size < min_allowed_weights:
+ raise ValueError(
+ "The minimum number of weights required to set weights is {}, got {}".format(
+ min_allowed_weights, non_zero_weights.size
+ )
+ )
+
+ # Normalize the weights to max value.
+ logging.info("Normalizing weights")
+ formatted_weights = normalize_max_weight(x=weights, limit=max_weight_limit)
+ logging.info(
+ f"Raw weights -> Normalized weights: {weights} -> {formatted_weights}"
+ )
+
+ # Ask before moving on.
+ if prompt:
+ table = Table(
+ Column("[dark_orange]Netuid", justify="center", style="bold green"),
+ Column(
+ "[dark_orange]Weight", justify="center", style="bold light_goldenrod2"
+ ),
+ expand=False,
+ show_edge=False,
+ )
+ print("Netuid | Weight")
+
+ for netuid, weight in zip(netuids, formatted_weights):
+ table.add_row(str(netuid), f"{weight:.8f}")
+ print(f"{netuid} | {weight}")
+
+ if not Confirm.ask("\nDo you want to set these root weights?"):
+ return False
+
+ try:
+ logging.info(":satellite: Setting root weights...")
+ weight_uids, weight_vals = convert_weights_and_uids_for_emit(netuids, weights)
+
+ success, error_message = await _do_set_weights()
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ if success is True:
+ logging.info(":white_heavy_check_mark: Finalized")
+ return True
+ else:
+ fmt_err = format_error_message(error_message, subtensor.substrate)
+ logging.error(f":cross_mark: Failed: {fmt_err}")
+ return False
+
+ except SubstrateRequestException as e:
+ fmt_err = format_error_message(e, subtensor.substrate)
+ logging.error(f":cross_mark: Failed: error:{fmt_err}")
+ return False
diff --git a/bittensor/core/extrinsics/async_transfer.py b/bittensor/core/extrinsics/async_transfer.py
new file mode 100644
index 0000000000..b9072ae9b8
--- /dev/null
+++ b/bittensor/core/extrinsics/async_transfer.py
@@ -0,0 +1,200 @@
+import asyncio
+from typing import TYPE_CHECKING
+
+from bittensor_wallet import Wallet
+from bittensor_wallet.errors import KeyFileError
+from rich.prompt import Confirm
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.settings import NETWORK_EXPLORER_MAP
+from bittensor.utils import (
+ format_error_message,
+ get_explorer_url_for_network,
+ is_valid_bittensor_address_or_public_key,
+)
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def transfer_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ destination: str,
+ amount: Balance,
+ transfer_all: bool = False,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ keep_alive: bool = True,
+ prompt: bool = False,
+) -> bool:
+ """Transfers funds from this wallet to the destination public key address.
+
+ :param subtensor: initialized AsyncSubtensor object used for transfer
+ :param wallet: Bittensor wallet object to make transfer from.
+ :param destination: Destination public key address (ss58_address or ed25519) of recipient.
+ :param amount: Amount to stake as Bittensor balance.
+ :param transfer_all: Whether to transfer all funds from this wallet to the destination address.
+ :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`,
+ or returns `False` if the extrinsic fails to enter the block within the timeout.
+ :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning
+ `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ :param keep_alive: If set, keeps the account alive by keeping the balance above the existential deposit.
+ :param prompt: If `True`, the call waits for confirmation from the user before proceeding.
+ :return: success: Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for
+ finalization / inclusion, the response is `True`, regardless of its inclusion.
+ """
+
+ async def get_transfer_fee() -> Balance:
+ """
+ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address.
+ This function simulates the transfer to estimate the associated cost, taking into account the current
+ network conditions and transaction complexity.
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": destination, "value": amount.rao},
+ )
+
+ try:
+ payment_info = await subtensor.substrate.get_payment_info(
+ call=call, keypair=wallet.coldkeypub
+ )
+ except SubstrateRequestException as e:
+ payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
+ logging.error(f":cross_mark: Failed to get payment info:")
+ logging.error(f"\t\t{format_error_message(e, subtensor.substrate)}")
+ logging.error(
+ f"\t\tDefaulting to default transfer fee: {payment_info['partialFee']}"
+ )
+
+ return Balance.from_rao(payment_info["partialFee"])
+
+ async def do_transfer() -> tuple[bool, str, str]:
+ """
+ Makes transfer from wallet to destination public key address.
+ :return: success, block hash, formatted error message
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": destination, "value": amount.rao},
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "", ""
+
+ # Otherwise continue with finalization.
+ await response.process_events()
+ if await response.is_success:
+ block_hash_ = response.block_hash
+ return True, block_hash_, ""
+ else:
+ return (
+ False,
+ "",
+ format_error_message(
+ await response.error_message, substrate=subtensor.substrate
+ ),
+ )
+
+ # Validate destination address.
+ if not is_valid_bittensor_address_or_public_key(destination):
+ logging.error(
+ f":cross_mark: Invalid destination SS58 address:[bold white]\n {destination}[/bold white]"
+ )
+ return False
+ logging.info(f"Initiating transfer on network: {subtensor.network}")
+ # Unlock wallet coldkey.
+ try:
+ wallet.unlock_coldkey()
+ except KeyFileError:
+ logging.error("Error decrypting coldkey (possibly incorrect password)")
+ return False
+
+ # Check balance.
+ logging.info(
+ f":satellite: Checking balance and fees on chain {subtensor.network}"
+ )
+ # check existential deposit and fee
+ logging.debug("Fetching existential and fee")
+ block_hash = await subtensor.substrate.get_chain_head()
+ account_balance_, existential_deposit = await asyncio.gather(
+ subtensor.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash),
+ subtensor.get_existential_deposit(block_hash=block_hash),
+ )
+ account_balance = account_balance_[wallet.coldkeypub.ss58_address]
+ fee = await get_transfer_fee()
+
+ if not keep_alive:
+ # Check if the transfer should keep_alive the account
+ existential_deposit = Balance(0)
+
+ # Check if we have enough balance.
+ if transfer_all is True:
+ amount = account_balance - fee - existential_deposit
+ if amount < Balance(0):
+ logging.error("Not enough balance to transfer")
+ return False
+
+ if account_balance < (amount + fee + existential_deposit):
+ logging.error(":cross_mark: Not enough balance")
+ logging.error(f"\t\tBalance:\t{account_balance}")
+ logging.error(f"\t\tAmount:\t{amount}")
+ logging.error(f"\t\tFor fee:\t{fee}")
+ return False
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask(
+ "Do you want to transfer:[bold white]\n"
+ f" amount: [bright_cyan]{amount}[/bright_cyan]\n"
+ f" from: [light_goldenrod2]{wallet.name}[/light_goldenrod2] : [bright_magenta]{wallet.coldkey.ss58_address}\n[/bright_magenta]"
+ f" to: [bright_magenta]{destination}[/bright_magenta]\n for fee: [bright_cyan]{fee}[/bright_cyan]"
+ ):
+ return False
+
+ logging.info(":satellite: Transferring...")
+ logging.info(f"[green]Block Hash: {block_hash}")
+
+ if subtensor.network == "finney":
+ logging.debug("Fetching explorer URLs")
+ explorer_urls = get_explorer_url_for_network(
+ subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ )
+ if explorer_urls != {} and explorer_urls:
+ logging.info(
+ f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}"
+ )
+ logging.info(
+ f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}"
+ )
+ else:
+ logging.error(f":cross_mark: Failed: {err_msg}")
+
+ if success:
+ logging.info(":satellite: Checking Balance...")
+ new_balance = await subtensor.get_balance(
+ wallet.coldkeypub.ss58_address, reuse_block=False
+ )
+ logging.info(
+ f"Balance: [blue]{account_balance} :arrow_right: [green]{new_balance[wallet.coldkeypub.ss58_address]}"
+ )
+ return True
+
+ return False
diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py
index 5e9f2e9e19..3dcfd5b2c2 100644
--- a/bittensor/core/extrinsics/commit_weights.py
+++ b/bittensor/core/extrinsics/commit_weights.py
@@ -139,7 +139,9 @@ def commit_weights_extrinsic(
logging.info(success_message)
return True, success_message
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(f"Failed to commit weights: {error_message}")
return False, error_message
@@ -269,6 +271,8 @@ def reveal_weights_extrinsic(
logging.info(success_message)
return True, success_message
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(f"Failed to reveal weights: {error_message}")
return False, error_message
diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py
index 8f7f3292b9..ba9dc73756 100644
--- a/bittensor/core/extrinsics/registration.py
+++ b/bittensor/core/extrinsics/registration.py
@@ -94,7 +94,9 @@ def make_substrate_call_with_retry():
# process if registration successful, try again if pow is still valid
response.process_events()
if not response.is_success:
- return False, format_error_message(response.error_message)
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
# Successful registration
else:
return True, None
@@ -335,7 +337,9 @@ def make_substrate_call_with_retry():
# process if registration successful, try again if pow is still valid
response.process_events()
if not response.is_success:
- return False, format_error_message(response.error_message)
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
# Successful registration
else:
return True, None
diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py
index 129e852777..445d2c0b06 100644
--- a/bittensor/core/extrinsics/root.py
+++ b/bittensor/core/extrinsics/root.py
@@ -49,7 +49,9 @@ def make_substrate_call_with_retry():
# process if registration successful, try again if pow is still valid
response.process_events()
if not response.is_success:
- return False, format_error_message(response.error_message)
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
# Successful registration
else:
return True, None
diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py
index ac712cd8cb..6eb7a67b25 100644
--- a/bittensor/core/extrinsics/serving.py
+++ b/bittensor/core/extrinsics/serving.py
@@ -186,7 +186,9 @@ def serve_extrinsic(
)
return True
else:
- logging.error(f"Failed: {format_error_message(error_message)}")
+ logging.error(
+ f"Failed: {format_error_message(error_message, substrate=subtensor.substrate)}"
+ )
return False
else:
return True
@@ -298,7 +300,9 @@ def publish_metadata(
if response.is_success:
return True
else:
- raise MetadataError(format_error_message(response.error_message))
+ raise MetadataError(
+ format_error_message(response.error_message, substrate=self.substrate)
+ )
# Community uses this function directly
diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py
index 98f4c16917..904b699926 100644
--- a/bittensor/core/extrinsics/set_weights.py
+++ b/bittensor/core/extrinsics/set_weights.py
@@ -179,7 +179,9 @@ def set_weights_extrinsic(
logging.success(f"Finalized! Set weights: {str(success)}")
return True, "Successfully set weights and Finalized."
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(error_message)
return False, error_message
diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py
index aaa2795583..b68a579967 100644
--- a/bittensor/core/extrinsics/transfer.py
+++ b/bittensor/core/extrinsics/transfer.py
@@ -198,7 +198,7 @@ def transfer_extrinsic(
)
else:
logging.error(
- f":cross_mark: Failed: {format_error_message(error_message)}"
+ f":cross_mark: Failed: {format_error_message(error_message, substrate=subtensor.substrate)}"
)
if success:
diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py
index 8413b5329f..8eee9676ad 100644
--- a/bittensor/core/settings.py
+++ b/bittensor/core/settings.py
@@ -30,16 +30,16 @@
WALLETS_DIR = USER_BITTENSOR_DIR / "wallets"
MINERS_DIR = USER_BITTENSOR_DIR / "miners"
-# Bittensor networks name
-NETWORKS = ["local", "finney", "test", "archive"]
-
-DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443"
-DEFAULT_NETWORK = NETWORKS[1]
# Create dirs if they don't exist
WALLETS_DIR.mkdir(parents=True, exist_ok=True)
MINERS_DIR.mkdir(parents=True, exist_ok=True)
+# Bittensor networks name
+NETWORKS = ["finney", "test", "archive", "local"]
+
+DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443"
+DEFAULT_NETWORK = NETWORKS[0]
# Bittensor endpoints (Needs to use wss://)
FINNEY_ENTRYPOINT = "wss://entrypoint-finney.opentensor.ai:443"
@@ -47,6 +47,13 @@
ARCHIVE_ENTRYPOINT = "wss://archive.chain.opentensor.ai:443/"
LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9944"
+NETWORK_MAP = {
+ NETWORKS[0]: FINNEY_ENTRYPOINT,
+ NETWORKS[1]: FINNEY_TEST_ENTRYPOINT,
+ NETWORKS[2]: ARCHIVE_ENTRYPOINT,
+ NETWORKS[3]: LOCAL_ENTRYPOINT,
+}
+
# Currency Symbols Bittensor
TAO_SYMBOL: str = chr(0x03C4)
RAO_SYMBOL: str = chr(0x03C1)
diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py
index 3e3c61b017..fcbb4147d7 100644
--- a/bittensor/core/subtensor.py
+++ b/bittensor/core/subtensor.py
@@ -16,7 +16,7 @@
# DEALINGS IN THE SOFTWARE.
"""
-The ``bittensor.core.subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor
+The ``bittensor.core.subtensor.Subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor
blockchain, facilitating a range of operations essential for the decentralized machine learning network.
"""
diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py
index 6239d89808..745726c264 100644
--- a/bittensor/utils/__init__.py
+++ b/bittensor/utils/__init__.py
@@ -15,8 +15,10 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+from urllib.parse import urlparse
+import ast
import hashlib
-from typing import Literal, Union, Optional, TYPE_CHECKING
+from typing import Any, Literal, Union, Optional, TYPE_CHECKING
import scalecodec
from bittensor_wallet import Keypair
@@ -28,6 +30,7 @@
from .version import version_checking, check_version, VersionCheckError
if TYPE_CHECKING:
+ from bittensor.utils.async_substrate_interface import AsyncSubstrateInterface
from substrateinterface import SubstrateInterface
RAOPERTAO = 1e9
@@ -142,14 +145,16 @@ def get_hash(content, encoding="utf-8"):
def format_error_message(
- error_message: dict, substrate: "SubstrateInterface" = None
+ error_message: Union[dict, Exception],
+ substrate: Union["AsyncSubstrateInterface", "SubstrateInterface"],
) -> str:
"""
Formats an error message from the Subtensor error information for use in extrinsics.
Args:
- error_message (dict): A dictionary containing the error information from Subtensor.
- substrate (SubstrateInterface, optional): The substrate interface to use.
+ error_message: A dictionary containing the error information from Subtensor, or a SubstrateRequestException
+ containing dictionary literal args.
+ substrate: The initialised SubstrateInterface object to use.
Returns:
str: A formatted error message string.
@@ -158,6 +163,27 @@ def format_error_message(
err_type = "UnknownType"
err_description = "Unknown Description"
+ if isinstance(error_message, Exception):
+ # generally gotten through SubstrateRequestException args
+ new_error_message = None
+ for arg in error_message.args:
+ try:
+ d = ast.literal_eval(arg)
+ if isinstance(d, dict):
+ if "error" in d:
+ new_error_message = d["error"]
+ break
+ elif all(x in d for x in ["code", "message", "data"]):
+ new_error_message = d
+ break
+ except ValueError:
+ pass
+ if new_error_message is None:
+ return_val = " ".join(error_message.args)
+ return f"Subtensor returned: {return_val}"
+ else:
+ error_message = new_error_message
+
if isinstance(error_message, dict):
# subtensor error structure
if (
@@ -166,14 +192,11 @@ def format_error_message(
and error_message.get("data")
):
err_name = "SubstrateRequestException"
- err_type = error_message.get("message")
- err_data = error_message.get("data")
+ err_type = error_message.get("message", "")
+ err_data = error_message.get("data", "")
# subtensor custom error marker
if err_data.startswith("Custom error:") and substrate:
- if not substrate.metadata:
- substrate.get_metadata()
-
if substrate.metadata:
try:
pallet = substrate.metadata.get_metadata_pallet(
@@ -185,8 +208,10 @@ def format_error_message(
err_type = error_dict.get("message", err_type)
err_docs = error_dict.get("docs", [])
err_description = err_docs[0] if err_docs else err_description
- except Exception:
- logging.error("Substrate pallets data unavailable.")
+ except (AttributeError, IndexError):
+ logging.error(
+ "Substrate pallets data unavailable. This is usually caused by an uninitialized substrate."
+ )
else:
err_description = err_data
@@ -277,3 +302,71 @@ def is_valid_bittensor_address_or_public_key(address: Union[str, bytes]) -> bool
else:
# Invalid address type
return False
+
+
+def decode_hex_identity_dict(info_dictionary) -> dict[str, Any]:
+ """
+ Decodes hex-encoded strings in a dictionary.
+
+ This function traverses the given dictionary, identifies hex-encoded strings, and decodes them into readable strings. It handles nested dictionaries and lists within the dictionary.
+
+ Args:
+ info_dictionary (dict): The dictionary containing hex-encoded strings to decode.
+
+ Returns:
+ dict: The dictionary with decoded strings.
+
+ Examples:
+ input_dict = {
+ ... "name": {"value": "0x6a6f686e"},
+ ... "additional": [
+ ... [{"data": "0x64617461"}]
+ ... ]
+ ... }
+ decode_hex_identity_dict(input_dict)
+ {'name': 'john', 'additional': [('data', 'data')]}
+ """
+
+ def get_decoded(data: str) -> str:
+ """Decodes a hex-encoded string."""
+ try:
+ return bytes.fromhex(data[2:]).decode()
+ except UnicodeDecodeError:
+ print(f"Could not decode: {key}: {item}")
+
+ for key, value in info_dictionary.items():
+ if isinstance(value, dict):
+ item = list(value.values())[0]
+ if isinstance(item, str) and item.startswith("0x"):
+ try:
+ info_dictionary[key] = get_decoded(item)
+ except UnicodeDecodeError:
+ print(f"Could not decode: {key}: {item}")
+ else:
+ info_dictionary[key] = item
+ if key == "additional":
+ additional = []
+ for item in value:
+ additional.append(
+ tuple(
+ get_decoded(data=next(iter(sub_item.values())))
+ for sub_item in item
+ )
+ )
+ info_dictionary[key] = additional
+
+ return info_dictionary
+
+
+def validate_chain_endpoint(endpoint_url: str) -> tuple[bool, str]:
+ """Validates if the provided endpoint URL is a valid WebSocket URL."""
+ parsed = urlparse(endpoint_url)
+ if parsed.scheme not in ("ws", "wss"):
+ return False, (
+ f"Invalid URL or network name provided: [bright_cyan]({endpoint_url})[/bright_cyan].\n"
+ "Allowed network names are [bright_cyan]finney, test, local[/bright_cyan]. "
+ "Valid chain endpoints should use the scheme [bright_cyan]`ws` or `wss`[/bright_cyan].\n"
+ )
+ if not parsed.netloc:
+ return False, "Invalid URL passed as the endpoint"
+ return True, ""
diff --git a/bittensor/utils/async_substrate_interface.py b/bittensor/utils/async_substrate_interface.py
new file mode 100644
index 0000000000..de0547e7b5
--- /dev/null
+++ b/bittensor/utils/async_substrate_interface.py
@@ -0,0 +1,2742 @@
+import asyncio
+import json
+import random
+from collections import defaultdict
+from dataclasses import dataclass
+from hashlib import blake2b
+from typing import Optional, Any, Union, Callable, Awaitable, cast
+
+import websockets
+from async_property import async_property
+from bittensor_wallet import Keypair
+from bt_decode import PortableRegistry, decode as decode_by_type_string, MetadataV15
+from scalecodec import GenericExtrinsic
+from scalecodec.base import ScaleBytes, ScaleType, RuntimeConfigurationObject
+from scalecodec.type_registry import load_type_registry_preset
+from scalecodec.types import GenericCall
+from substrateinterface.exceptions import (
+ SubstrateRequestException,
+ ExtrinsicNotFound,
+ BlockNotFound,
+)
+from substrateinterface.storage import StorageKey
+
+ResultHandler = Callable[[dict, Any], Awaitable[tuple[dict, bool]]]
+
+
+class TimeoutException(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ raise TimeoutException("Operation timed out")
+
+
+class ExtrinsicReceipt:
+ """
+ Object containing information of submitted extrinsic. Block hash where extrinsic is included is required
+ when retrieving triggered events or determine if extrinsic was successful
+ """
+
+ def __init__(
+ self,
+ substrate: "AsyncSubstrateInterface",
+ extrinsic_hash: Optional[str] = None,
+ block_hash: Optional[str] = None,
+ block_number: Optional[int] = None,
+ extrinsic_idx: Optional[int] = None,
+ finalized=None,
+ ):
+ """
+ Object containing information of submitted extrinsic. Block hash where extrinsic is included is required
+ when retrieving triggered events or determine if extrinsic was successful
+
+ Parameters
+ ----------
+ substrate
+ extrinsic_hash
+ block_hash
+ finalized
+ """
+ self.substrate = substrate
+ self.extrinsic_hash = extrinsic_hash
+ self.block_hash = block_hash
+ self.block_number = block_number
+ self.finalized = finalized
+
+ self.__extrinsic_idx = extrinsic_idx
+ self.__extrinsic = None
+
+ self.__triggered_events: Optional[list] = None
+ self.__is_success: Optional[bool] = None
+ self.__error_message = None
+ self.__weight = None
+ self.__total_fee_amount = None
+
+ async def get_extrinsic_identifier(self) -> str:
+ """
+ Returns the on-chain identifier for this extrinsic in format "[block_number]-[extrinsic_idx]" e.g. 134324-2
+ Returns
+ -------
+ str
+ """
+ if self.block_number is None:
+ if self.block_hash is None:
+ raise ValueError(
+ "Cannot create extrinsic identifier: block_hash is not set"
+ )
+
+ self.block_number = await self.substrate.get_block_number(self.block_hash)
+
+ if self.block_number is None:
+ raise ValueError(
+ "Cannot create extrinsic identifier: unknown block_hash"
+ )
+
+ return f"{self.block_number}-{await self.extrinsic_idx}"
+
+ async def retrieve_extrinsic(self):
+ if not self.block_hash:
+ raise ValueError(
+ "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is "
+ "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic"
+ )
+ # Determine extrinsic idx
+
+ block = await self.substrate.get_block(block_hash=self.block_hash)
+
+ extrinsics = block["extrinsics"]
+
+ if len(extrinsics) > 0:
+ if self.__extrinsic_idx is None:
+ self.__extrinsic_idx = self.__get_extrinsic_index(
+ block_extrinsics=extrinsics, extrinsic_hash=self.extrinsic_hash
+ )
+
+ if self.__extrinsic_idx >= len(extrinsics):
+ raise ExtrinsicNotFound()
+
+ self.__extrinsic = extrinsics[self.__extrinsic_idx]
+
+ @async_property
+ async def extrinsic_idx(self) -> int:
+ """
+ Retrieves the index of this extrinsic in containing block
+
+ Returns
+ -------
+ int
+ """
+ if self.__extrinsic_idx is None:
+ await self.retrieve_extrinsic()
+ return self.__extrinsic_idx
+
+ @async_property
+ async def triggered_events(self) -> list:
+ """
+ Gets triggered events for submitted extrinsic. block_hash where extrinsic is included is required, manually
+ set block_hash or use `wait_for_inclusion` when submitting extrinsic
+
+ Returns
+ -------
+ list
+ """
+ if self.__triggered_events is None:
+ if not self.block_hash:
+ raise ValueError(
+ "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is "
+ "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic"
+ )
+
+ if await self.extrinsic_idx is None:
+ await self.retrieve_extrinsic()
+
+ self.__triggered_events = []
+
+ for event in await self.substrate.get_events(block_hash=self.block_hash):
+ if event["extrinsic_idx"] == await self.extrinsic_idx:
+ self.__triggered_events.append(event)
+
+ return cast(list, self.__triggered_events)
+
+ async def process_events(self):
+ if await self.triggered_events:
+ self.__total_fee_amount = 0
+
+ # Process fees
+ has_transaction_fee_paid_event = False
+
+ for event in await self.triggered_events:
+ if (
+ event["event"]["module_id"] == "TransactionPayment"
+ and event["event"]["event_id"] == "TransactionFeePaid"
+ ):
+ self.__total_fee_amount = event["event"]["attributes"]["actual_fee"]
+ has_transaction_fee_paid_event = True
+
+ # Process other events
+ for event in await self.triggered_events:
+ # Check events
+ if (
+ event["event"]["module_id"] == "System"
+ and event["event"]["event_id"] == "ExtrinsicSuccess"
+ ):
+ self.__is_success = True
+ self.__error_message = None
+
+ if "dispatch_info" in event["event"]["attributes"]:
+ self.__weight = event["event"]["attributes"]["dispatch_info"][
+ "weight"
+ ]
+ else:
+ # Backwards compatibility
+ self.__weight = event["event"]["attributes"]["weight"]
+
+ elif (
+ event["event"]["module_id"] == "System"
+ and event["event"]["event_id"] == "ExtrinsicFailed"
+ ):
+ self.__is_success = False
+
+ dispatch_info = event["event"]["attributes"]["dispatch_info"]
+ dispatch_error = event["event"]["attributes"]["dispatch_error"]
+
+ self.__weight = dispatch_info["weight"]
+
+ if "Module" in dispatch_error:
+ module_index = dispatch_error["Module"][0]["index"]
+ error_index = int.from_bytes(
+ bytes(dispatch_error["Module"][0]["error"]),
+ byteorder="little",
+ signed=False,
+ )
+
+ if isinstance(error_index, str):
+ # Actual error index is first u8 in new [u8; 4] format
+ error_index = int(error_index[2:4], 16)
+ module_error = self.substrate.metadata.get_module_error(
+ module_index=module_index, error_index=error_index
+ )
+ self.__error_message = {
+ "type": "Module",
+ "name": module_error.name,
+ "docs": module_error.docs,
+ }
+ elif "BadOrigin" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "BadOrigin",
+ "docs": "Bad origin",
+ }
+ elif "CannotLookup" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "CannotLookup",
+ "docs": "Cannot lookup",
+ }
+ elif "Other" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "Other",
+ "docs": "Unspecified error occurred",
+ }
+
+ elif not has_transaction_fee_paid_event:
+ if (
+ event["event"]["module_id"] == "Treasury"
+ and event["event"]["event_id"] == "Deposit"
+ ):
+ self.__total_fee_amount += event["event"]["attributes"]["value"]
+ elif (
+ event["event"]["module_id"] == "Balances"
+ and event["event"]["event_id"] == "Deposit"
+ ):
+ self.__total_fee_amount += event.value["attributes"]["amount"]
+
+ @async_property
+ async def is_success(self) -> bool:
+ """
+ Returns `True` if `ExtrinsicSuccess` event is triggered, `False` in case of `ExtrinsicFailed`
+ In case of False `error_message` will contain more details about the error
+
+
+ Returns
+ -------
+ bool
+ """
+ if self.__is_success is None:
+ await self.process_events()
+
+ return cast(bool, self.__is_success)
+
+ @async_property
+ async def error_message(self) -> Optional[dict]:
+ """
+ Returns the error message if the extrinsic failed in format e.g.:
+
+ `{'type': 'System', 'name': 'BadOrigin', 'docs': 'Bad origin'}`
+
+ Returns
+ -------
+ dict
+ """
+ if self.__error_message is None:
+ if await self.is_success:
+ return None
+ await self.process_events()
+ return self.__error_message
+
+ @async_property
+ async def weight(self) -> Union[int, dict]:
+ """
+ Contains the actual weight when executing this extrinsic
+
+ Returns
+ -------
+ int (WeightV1) or dict (WeightV2)
+ """
+ if self.__weight is None:
+ await self.process_events()
+ return self.__weight
+
+ @async_property
+ async def total_fee_amount(self) -> int:
+ """
+ Contains the total fee costs deducted when executing this extrinsic. This includes fee for the validator (
+ (`Balances.Deposit` event) and the fee deposited for the treasury (`Treasury.Deposit` event)
+
+ Returns
+ -------
+ int
+ """
+ if self.__total_fee_amount is None:
+ await self.process_events()
+ return cast(int, self.__total_fee_amount)
+
+ # Helper functions
+ @staticmethod
+ def __get_extrinsic_index(block_extrinsics: list, extrinsic_hash: str) -> int:
+ """
+ Returns the index of a provided extrinsic
+ """
+ for idx, extrinsic in enumerate(block_extrinsics):
+ if (
+ extrinsic.extrinsic_hash
+ and f"0x{extrinsic.extrinsic_hash.hex()}" == extrinsic_hash
+ ):
+ return idx
+ raise ExtrinsicNotFound()
+
+ # Backwards compatibility methods
+ def __getitem__(self, item):
+ return getattr(self, item)
+
+ def __iter__(self):
+ for item in self.__dict__.items():
+ yield item
+
+ def get(self, name):
+ return self[name]
+
+
+class QueryMapResult:
+ def __init__(
+ self,
+ records: list,
+ page_size: int,
+ substrate: "AsyncSubstrateInterface",
+ module: Optional[str] = None,
+ storage_function: Optional[str] = None,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ last_key: Optional[str] = None,
+ max_results: Optional[int] = None,
+ ignore_decoding_errors: bool = False,
+ ):
+ self.records = records
+ self.page_size = page_size
+ self.module = module
+ self.storage_function = storage_function
+ self.block_hash = block_hash
+ self.substrate = substrate
+ self.last_key = last_key
+ self.max_results = max_results
+ self.params = params
+ self.ignore_decoding_errors = ignore_decoding_errors
+ self.loading_complete = False
+ self._buffer = iter(self.records) # Initialize the buffer with initial records
+
+ async def retrieve_next_page(self, start_key) -> list:
+ result = await self.substrate.query_map(
+ module=self.module,
+ storage_function=self.storage_function,
+ params=self.params,
+ page_size=self.page_size,
+ block_hash=self.block_hash,
+ start_key=start_key,
+ max_results=self.max_results,
+ ignore_decoding_errors=self.ignore_decoding_errors,
+ )
+
+ # Update last key from new result set to use as offset for next page
+ self.last_key = result.last_key
+ return result.records
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ try:
+ # Try to get the next record from the buffer
+ return next(self._buffer)
+ except StopIteration:
+ # If no more records in the buffer, try to fetch the next page
+ if self.loading_complete:
+ raise StopAsyncIteration
+
+ next_page = await self.retrieve_next_page(self.last_key)
+ if not next_page:
+ self.loading_complete = True
+ raise StopAsyncIteration
+
+ # Update the buffer with the newly fetched records
+ self._buffer = iter(next_page)
+ return next(self._buffer)
+
+ def __getitem__(self, item):
+ return self.records[item]
+
+
+@dataclass
+class Preprocessed:
+ queryable: str
+ method: str
+ params: list
+ value_scale_type: str
+ storage_item: ScaleType
+
+
+class RuntimeCache:
+ blocks: dict[int, "Runtime"]
+ block_hashes: dict[str, "Runtime"]
+
+ def __init__(self):
+ self.blocks = {}
+ self.block_hashes = {}
+
+ def add_item(
+ self, block: Optional[int], block_hash: Optional[str], runtime: "Runtime"
+ ):
+ if block is not None:
+ self.blocks[block] = runtime
+ if block_hash is not None:
+ self.block_hashes[block_hash] = runtime
+
+ def retrieve(
+ self, block: Optional[int] = None, block_hash: Optional[str] = None
+ ) -> Optional["Runtime"]:
+ if block is not None:
+ return self.blocks.get(block)
+ elif block_hash is not None:
+ return self.block_hashes.get(block_hash)
+ else:
+ return None
+
+
+class Runtime:
+ block_hash: str
+ block_id: int
+ runtime_version = None
+ transaction_version = None
+ cache_region = None
+ metadata = None
+ type_registry_preset = None
+
+ def __init__(self, chain, runtime_config, metadata, type_registry):
+ self.runtime_config = RuntimeConfigurationObject()
+ self.config = {}
+ self.chain = chain
+ self.type_registry = type_registry
+ self.runtime_config = runtime_config
+ self.metadata = metadata
+
+ @property
+ def implements_scaleinfo(self) -> bool:
+ """
+ Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher)
+ """
+ if self.metadata:
+ return self.metadata.portable_registry is not None
+ else:
+ return False
+
+ def reload_type_registry(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ """
+ Reload type registry and preset used to instantiate the SubstrateInterface object. Useful to periodically apply
+ changes in type definitions when a runtime upgrade occurred
+
+ Parameters
+ ----------
+ use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed
+ scalecodec package
+ auto_discover
+
+ Returns
+ -------
+
+ """
+ self.runtime_config.clear_type_registry()
+
+ self.runtime_config.implements_scale_info = self.implements_scaleinfo
+
+ # Load metadata types in runtime configuration
+ self.runtime_config.update_type_registry(load_type_registry_preset(name="core"))
+ self.apply_type_registry_presets(
+ use_remote_preset=use_remote_preset, auto_discover=auto_discover
+ )
+
+ def apply_type_registry_presets(
+ self,
+ use_remote_preset: bool = True,
+ auto_discover: bool = True,
+ ):
+ """
+ Applies type registry presets to the runtime
+ :param use_remote_preset: bool, whether to use presets from remote
+ :param auto_discover: bool, whether to use presets from local installed scalecodec package
+ """
+ if self.type_registry_preset is not None:
+ # Load type registry according to preset
+ type_registry_preset_dict = load_type_registry_preset(
+ name=self.type_registry_preset, use_remote_preset=use_remote_preset
+ )
+
+ if not type_registry_preset_dict:
+ raise ValueError(
+ f"Type registry preset '{self.type_registry_preset}' not found"
+ )
+
+ elif auto_discover:
+ # Try to auto discover type registry preset by chain name
+ type_registry_name = self.chain.lower().replace(" ", "-")
+ try:
+ type_registry_preset_dict = load_type_registry_preset(
+ type_registry_name
+ )
+ self.type_registry_preset = type_registry_name
+ except ValueError:
+ type_registry_preset_dict = None
+
+ else:
+ type_registry_preset_dict = None
+
+ if type_registry_preset_dict:
+ # Load type registries in runtime configuration
+ if self.implements_scaleinfo is False:
+ # Only runtime with no embedded types in metadata need the default set of explicit defined types
+ self.runtime_config.update_type_registry(
+ load_type_registry_preset(
+ "legacy", use_remote_preset=use_remote_preset
+ )
+ )
+
+ if self.type_registry_preset != "legacy":
+ self.runtime_config.update_type_registry(type_registry_preset_dict)
+
+ if self.type_registry:
+ # Load type registries in runtime configuration
+ self.runtime_config.update_type_registry(self.type_registry)
+
+
+class RequestManager:
+ RequestResults = dict[Union[str, int], list[Union[ScaleType, dict]]]
+
+ def __init__(self, payloads):
+ self.response_map = {}
+ self.responses = defaultdict(lambda: {"complete": False, "results": []})
+ self.payloads_count = len(payloads)
+
+ def add_request(self, item_id: int, request_id: Any):
+ """
+ Adds an outgoing request to the responses map for later retrieval
+ """
+ self.response_map[item_id] = request_id
+
+ def overwrite_request(self, item_id: int, request_id: Any):
+ """
+ Overwrites an existing request in the responses map with a new request_id. This is used
+ for multipart responses that generate a subscription id we need to watch, rather than the initial
+ request_id.
+ """
+ self.response_map[request_id] = self.response_map.pop(item_id)
+ return request_id
+
+ def add_response(self, item_id: int, response: dict, complete: bool):
+ """
+ Maps a response to the request for later retrieval
+ """
+ request_id = self.response_map[item_id]
+ self.responses[request_id]["results"].append(response)
+ self.responses[request_id]["complete"] = complete
+
+ @property
+ def is_complete(self) -> bool:
+ """
+ Returns whether all requests in the manager have completed
+ """
+ return (
+ all(info["complete"] for info in self.responses.values())
+ and len(self.responses) == self.payloads_count
+ )
+
+ def get_results(self) -> RequestResults:
+ """
+ Generates a dictionary mapping the requests initiated to the responses received.
+ """
+ return {
+ request_id: info["results"] for request_id, info in self.responses.items()
+ }
+
+
+class Websocket:
+ def __init__(
+ self,
+ ws_url: str,
+ max_subscriptions=1024,
+ max_connections=100,
+ shutdown_timer=5,
+ options: Optional[dict] = None,
+ ):
+ """
+ Websocket manager object. Allows for the use of a single websocket connection by multiple
+ calls.
+
+ :param ws_url: Websocket URL to connect to
+ :param max_subscriptions: Maximum number of subscriptions per websocket connection
+ :param max_connections: Maximum number of connections total
+ :param shutdown_timer: Number of seconds to shut down websocket connection after last use
+ """
+ # TODO allow setting max concurrent connections and rpc subscriptions per connection
+ # TODO reconnection logic
+ self.ws_url = ws_url
+ self.ws: Optional[websockets.WebSocketClientProtocol] = None
+ self.id = 0
+ self.max_subscriptions = max_subscriptions
+ self.max_connections = max_connections
+ self.shutdown_timer = shutdown_timer
+ self._received = {}
+ self._in_use = 0
+ self._receiving_task = None
+ self._attempts = 0
+ self._initialized = False
+ self._lock = asyncio.Lock()
+ self._exit_task = None
+ self._open_subscriptions = 0
+ self._options = options if options else {}
+
+ async def __aenter__(self):
+ async with self._lock:
+ self._in_use += 1
+ if self._exit_task:
+ self._exit_task.cancel()
+ if not self._initialized:
+ self._initialized = True
+ await self._connect()
+ self._receiving_task = asyncio.create_task(self._start_receiving())
+ return self
+
+ async def _connect(self):
+ self.ws = await asyncio.wait_for(
+ websockets.connect(self.ws_url, **self._options), timeout=10
+ )
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ async with self._lock:
+ self._in_use -= 1
+ if self._exit_task is not None:
+ self._exit_task.cancel()
+ try:
+ await self._exit_task
+ except asyncio.CancelledError:
+ pass
+ if self._in_use == 0 and self.ws is not None:
+ self.id = 0
+ self._open_subscriptions = 0
+ self._exit_task = asyncio.create_task(self._exit_with_timer())
+
+ async def _exit_with_timer(self):
+ """
+ Allows for graceful shutdown of websocket connection after specified number of seconds, allowing
+ for reuse of the websocket connection.
+ """
+ try:
+ await asyncio.sleep(self.shutdown_timer)
+ await self.shutdown()
+ except asyncio.CancelledError:
+ pass
+
+ async def shutdown(self):
+ async with self._lock:
+ try:
+ self._receiving_task.cancel()
+ await self._receiving_task
+ await self.ws.close()
+ except (AttributeError, asyncio.CancelledError):
+ pass
+ self.ws = None
+ self._initialized = False
+ self._receiving_task = None
+ self.id = 0
+
+ async def _recv(self) -> None:
+ try:
+ response = json.loads(
+ await cast(websockets.WebSocketClientProtocol, self.ws).recv()
+ )
+ async with self._lock:
+ self._open_subscriptions -= 1
+ if "id" in response:
+ self._received[response["id"]] = response
+ elif "params" in response:
+ self._received[response["params"]["subscription"]] = response
+ else:
+ raise KeyError(response)
+ except websockets.ConnectionClosed:
+ raise
+ except KeyError as e:
+ raise e
+
+ async def _start_receiving(self):
+ try:
+ while True:
+ await self._recv()
+ except asyncio.CancelledError:
+ pass
+ except websockets.ConnectionClosed:
+ # TODO try reconnect, but only if it's needed
+ raise
+
+ async def send(self, payload: dict) -> int:
+ """
+ Sends a payload to the websocket connection.
+
+ :param payload: payload, generate a payload with the AsyncSubstrateInterface.make_payload method
+ """
+ async with self._lock:
+ original_id = self.id
+ self.id += 1
+ self._open_subscriptions += 1
+ try:
+ await self.ws.send(json.dumps({**payload, **{"id": original_id}}))
+ return original_id
+ except websockets.ConnectionClosed:
+ raise
+
+ async def retrieve(self, item_id: int) -> Optional[dict]:
+ """
+ Retrieves a single item from received responses dict queue
+
+ :param item_id: id of the item to retrieve
+
+ :return: retrieved item
+ """
+ while True:
+ async with self._lock:
+ if item_id in self._received:
+ return self._received.pop(item_id)
+ await asyncio.sleep(0.1)
+
+
+class AsyncSubstrateInterface:
+ runtime = None
+ registry: Optional[PortableRegistry] = None
+
+ def __init__(
+ self,
+ chain_endpoint: str,
+ use_remote_preset=False,
+ auto_discover=True,
+ auto_reconnect=True,
+ ss58_format=None,
+ type_registry=None,
+ chain_name=None,
+ ):
+ """
+ The asyncio-compatible version of the subtensor interface commands we use in bittensor
+ """
+ self.chain_endpoint = chain_endpoint
+ self.__chain = chain_name
+ self.ws = Websocket(
+ chain_endpoint,
+ options={
+ "max_size": 2**32,
+ "read_limit": 2**16,
+ "write_limit": 2**16,
+ },
+ )
+ self._lock = asyncio.Lock()
+ self.last_block_hash: Optional[str] = None
+ self.config = {
+ "use_remote_preset": use_remote_preset,
+ "auto_discover": auto_discover,
+ "auto_reconnect": auto_reconnect,
+ "rpc_methods": None,
+ "strict_scale_decode": True,
+ }
+ self.initialized = False
+ self._forgettable_task = None
+ self.ss58_format = ss58_format
+ self.type_registry = type_registry
+ self.runtime_cache = RuntimeCache()
+ self.block_id: Optional[int] = None
+ self.runtime_version = None
+ self.runtime_config = RuntimeConfigurationObject()
+ self.__metadata_cache = {}
+ self.type_registry_preset = None
+ self.transaction_version = None
+ self.metadata = None
+ self.metadata_version_hex = "0x0f000000" # v15
+
+ async def __aenter__(self):
+ await self.initialize()
+
+ async def initialize(self):
+ """
+ Initialize the connection to the chain.
+ """
+ async with self._lock:
+ if not self.initialized:
+ if not self.__chain:
+ chain = await self.rpc_request("system_chain", [])
+ self.__chain = chain.get("result")
+ self.reload_type_registry()
+ await asyncio.gather(self.load_registry(), self.init_runtime(None))
+ self.initialized = True
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+ @property
+ def chain(self):
+ """
+ Returns the substrate chain currently associated with object
+ """
+ return self.__chain
+
+ async def get_storage_item(self, module: str, storage_function: str):
+ if not self.metadata:
+ await self.init_runtime()
+ metadata_pallet = self.metadata.get_metadata_pallet(module)
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+ return storage_item
+
+ async def _get_current_block_hash(
+ self, block_hash: Optional[str], reuse: bool
+ ) -> Optional[str]:
+ if block_hash:
+ self.last_block_hash = block_hash
+ return block_hash
+ elif reuse:
+ if self.last_block_hash:
+ return self.last_block_hash
+ return block_hash
+
+ async def load_registry(self):
+ metadata_rpc_result = await self.rpc_request(
+ "state_call",
+ ["Metadata_metadata_at_version", self.metadata_version_hex],
+ )
+ metadata_option_hex_str = metadata_rpc_result["result"]
+ metadata_option_bytes = bytes.fromhex(metadata_option_hex_str[2:])
+ metadata_v15 = MetadataV15.decode_from_metadata_option(metadata_option_bytes)
+ self.registry = PortableRegistry.from_metadata_v15(metadata_v15)
+
+ async def decode_scale(
+ self, type_string, scale_bytes: bytes, return_scale_obj=False
+ ):
+ """
+ Helper function to decode arbitrary SCALE-bytes (e.g. 0x02000000) according to given RUST type_string
+ (e.g. BlockNumber). The relevant versioning information of the type (if defined) will be applied if block_hash
+ is set
+
+ Parameters
+ ----------
+ type_string
+ scale_bytes
+ block_hash
+ return_scale_obj: if True the SCALE object itself is returned, otherwise the serialized dict value of the object
+
+ Returns
+ -------
+
+ """
+ if scale_bytes == b"\x00":
+ obj = None
+ else:
+ obj = decode_by_type_string(type_string, self.registry, scale_bytes)
+ return obj
+
+ async def init_runtime(
+ self, block_hash: Optional[str] = None, block_id: Optional[int] = None
+ ) -> Runtime:
+ """
+ This method is used by all other methods that deals with metadata and types defined in the type registry.
+ It optionally retrieves the block_hash when block_id is given and sets the applicable metadata for that
+ block_hash. Also, it applies all the versioned types at the time of the block_hash.
+
+ Because parsing of metadata and type registry is quite heavy, the result will be cached per runtime id.
+ In the future there could be support for caching backends like Redis to make this cache more persistent.
+
+ :param block_hash: optional block hash, should not be specified if block_id is
+ :param block_id: optional block id, should not be specified if block_hash is
+
+ :returns: Runtime object
+ """
+
+ async def get_runtime(block_hash, block_id) -> Runtime:
+ # Check if runtime state already set to current block
+ if (block_hash and block_hash == self.last_block_hash) or (
+ block_id and block_id == self.block_id
+ ):
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ if block_id is not None:
+ block_hash = await self.get_block_hash(block_id)
+
+ if not block_hash:
+ block_hash = await self.get_chain_head()
+
+ self.last_block_hash = block_hash
+ self.block_id = block_id
+
+ # In fact calls and storage functions are decoded against runtime of previous block, therefor retrieve
+ # metadata and apply type registry of runtime of parent block
+ block_header = await self.rpc_request(
+ "chain_getHeader", [self.last_block_hash]
+ )
+
+ if block_header["result"] is None:
+ raise SubstrateRequestException(
+ f'Block not found for "{self.last_block_hash}"'
+ )
+
+ parent_block_hash: str = block_header["result"]["parentHash"]
+
+ if (
+ parent_block_hash
+ == "0x0000000000000000000000000000000000000000000000000000000000000000"
+ ):
+ runtime_block_hash = self.last_block_hash
+ else:
+ runtime_block_hash = parent_block_hash
+
+ runtime_info = await self.get_block_runtime_version(
+ block_hash=runtime_block_hash
+ )
+
+ if runtime_info is None:
+ raise SubstrateRequestException(
+ f"No runtime information for block '{block_hash}'"
+ )
+
+ # Check if runtime state already set to current block
+ if runtime_info.get("specVersion") == self.runtime_version:
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ self.runtime_version = runtime_info.get("specVersion")
+ self.transaction_version = runtime_info.get("transactionVersion")
+
+ if not self.metadata:
+ if self.runtime_version in self.__metadata_cache:
+ # Get metadata from cache
+ # self.debug_message('Retrieved metadata for {} from memory'.format(self.runtime_version))
+ self.metadata = self.__metadata_cache[self.runtime_version]
+ else:
+ self.metadata = await self.get_block_metadata(
+ block_hash=runtime_block_hash, decode=True
+ )
+ # self.debug_message('Retrieved metadata for {} from Substrate node'.format(self.runtime_version))
+
+ # Update metadata cache
+ self.__metadata_cache[self.runtime_version] = self.metadata
+
+ # Update type registry
+ self.reload_type_registry(use_remote_preset=False, auto_discover=True)
+
+ if self.implements_scaleinfo:
+ # self.debug_message('Add PortableRegistry from metadata to type registry')
+ self.runtime_config.add_portable_registry(self.metadata)
+
+ # Set active runtime version
+ self.runtime_config.set_active_spec_version_id(self.runtime_version)
+
+ # Check and apply runtime constants
+ ss58_prefix_constant = await self.get_constant(
+ "System", "SS58Prefix", block_hash=block_hash
+ )
+
+ if ss58_prefix_constant:
+ self.ss58_format = ss58_prefix_constant
+
+ # Set runtime compatibility flags
+ try:
+ _ = self.runtime_config.create_scale_object(
+ "sp_weights::weight_v2::Weight"
+ )
+ self.config["is_weight_v2"] = True
+ self.runtime_config.update_type_registry_types(
+ {"Weight": "sp_weights::weight_v2::Weight"}
+ )
+ except NotImplementedError:
+ self.config["is_weight_v2"] = False
+ self.runtime_config.update_type_registry_types({"Weight": "WeightV1"})
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ if block_id and block_hash:
+ raise ValueError("Cannot provide block_hash and block_id at the same time")
+
+ if not (runtime := self.runtime_cache.retrieve(block_id, block_hash)):
+ runtime = await get_runtime(block_hash, block_id)
+ self.runtime_cache.add_item(block_id, block_hash, runtime)
+ return runtime
+
+ def reload_type_registry(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ """
+ Reload type registry and preset used to instantiate the SubtrateInterface object. Useful to periodically apply
+ changes in type definitions when a runtime upgrade occurred
+
+ Parameters
+ ----------
+ use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed scalecodec package
+ auto_discover
+
+ Returns
+ -------
+
+ """
+ self.runtime_config.clear_type_registry()
+
+ self.runtime_config.implements_scale_info = self.implements_scaleinfo
+
+ # Load metadata types in runtime configuration
+ self.runtime_config.update_type_registry(load_type_registry_preset(name="core"))
+ self.apply_type_registry_presets(
+ use_remote_preset=use_remote_preset, auto_discover=auto_discover
+ )
+
+ def apply_type_registry_presets(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ if self.type_registry_preset is not None:
+ # Load type registry according to preset
+ type_registry_preset_dict = load_type_registry_preset(
+ name=self.type_registry_preset, use_remote_preset=use_remote_preset
+ )
+
+ if not type_registry_preset_dict:
+ raise ValueError(
+ f"Type registry preset '{self.type_registry_preset}' not found"
+ )
+
+ elif auto_discover:
+ # Try to auto discover type registry preset by chain name
+ type_registry_name = self.chain.lower().replace(" ", "-")
+ try:
+ type_registry_preset_dict = load_type_registry_preset(
+ type_registry_name
+ )
+ # self.debug_message(f"Auto set type_registry_preset to {type_registry_name} ...")
+ self.type_registry_preset = type_registry_name
+ except ValueError:
+ type_registry_preset_dict = None
+
+ else:
+ type_registry_preset_dict = None
+
+ if type_registry_preset_dict:
+ # Load type registries in runtime configuration
+ if self.implements_scaleinfo is False:
+ # Only runtime with no embedded types in metadata need the default set of explicit defined types
+ self.runtime_config.update_type_registry(
+ load_type_registry_preset(
+ "legacy", use_remote_preset=use_remote_preset
+ )
+ )
+
+ if self.type_registry_preset != "legacy":
+ self.runtime_config.update_type_registry(type_registry_preset_dict)
+
+ if self.type_registry:
+ # Load type registries in runtime configuration
+ self.runtime_config.update_type_registry(self.type_registry)
+
+ @property
+ def implements_scaleinfo(self) -> Optional[bool]:
+ """
+ Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher)
+
+ Returns
+ -------
+ bool
+ """
+ if self.metadata:
+ return self.metadata.portable_registry is not None
+ else:
+ return None
+
+ async def create_storage_key(
+ self,
+ pallet: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: str = None,
+ ) -> StorageKey:
+ """
+ Create a `StorageKey` instance providing storage function details. See `subscribe_storage()`.
+
+ Parameters
+ ----------
+ pallet: name of pallet
+ storage_function: name of storage function
+ params: Optional list of parameters in case of a Mapped storage function
+
+ Returns
+ -------
+ StorageKey
+ """
+ await self.init_runtime(block_hash=block_hash)
+
+ return StorageKey.create_from_storage_function(
+ pallet,
+ storage_function,
+ params,
+ runtime_config=self.runtime_config,
+ metadata=self.metadata,
+ )
+
+ async def _get_block_handler(
+ self,
+ block_hash: str,
+ ignore_decoding_errors: bool = False,
+ include_author: bool = False,
+ header_only: bool = False,
+ finalized_only: bool = False,
+ subscription_handler: Optional[Callable] = None,
+ ):
+ try:
+ await self.init_runtime(block_hash=block_hash)
+ except BlockNotFound:
+ return None
+
+ async def decode_block(block_data, block_data_hash=None):
+ if block_data:
+ if block_data_hash:
+ block_data["header"]["hash"] = block_data_hash
+
+ if type(block_data["header"]["number"]) is str:
+ # Convert block number from hex (backwards compatibility)
+ block_data["header"]["number"] = int(
+ block_data["header"]["number"], 16
+ )
+
+ extrinsic_cls = self.runtime_config.get_decoder_class("Extrinsic")
+
+ if "extrinsics" in block_data:
+ for idx, extrinsic_data in enumerate(block_data["extrinsics"]):
+ extrinsic_decoder = extrinsic_cls(
+ data=ScaleBytes(extrinsic_data),
+ metadata=self.metadata,
+ runtime_config=self.runtime_config,
+ )
+ try:
+ extrinsic_decoder.decode(check_remaining=True)
+ block_data["extrinsics"][idx] = extrinsic_decoder
+
+ except Exception as e:
+ if not ignore_decoding_errors:
+ raise
+ block_data["extrinsics"][idx] = None
+
+ for idx, log_data in enumerate(block_data["header"]["digest"]["logs"]):
+ if type(log_data) is str:
+ # Convert digest log from hex (backwards compatibility)
+ try:
+ log_digest_cls = self.runtime_config.get_decoder_class(
+ "sp_runtime::generic::digest::DigestItem"
+ )
+
+ if log_digest_cls is None:
+ raise NotImplementedError(
+ "No decoding class found for 'DigestItem'"
+ )
+
+ log_digest = log_digest_cls(data=ScaleBytes(log_data))
+ log_digest.decode(
+ check_remaining=self.config.get("strict_scale_decode")
+ )
+
+ block_data["header"]["digest"]["logs"][idx] = log_digest
+
+ if include_author and "PreRuntime" in log_digest.value:
+ if self.implements_scaleinfo:
+ engine = bytes(log_digest[1][0])
+ # Retrieve validator set
+ parent_hash = block_data["header"]["parentHash"]
+ validator_set = await self.query(
+ "Session", "Validators", block_hash=parent_hash
+ )
+
+ if engine == b"BABE":
+ babe_predigest = (
+ self.runtime_config.create_scale_object(
+ type_string="RawBabePreDigest",
+ data=ScaleBytes(
+ bytes(log_digest[1][1])
+ ),
+ )
+ )
+
+ babe_predigest.decode(
+ check_remaining=self.config.get(
+ "strict_scale_decode"
+ )
+ )
+
+ rank_validator = babe_predigest[1].value[
+ "authority_index"
+ ]
+
+ block_author = validator_set[rank_validator]
+ block_data["author"] = block_author.value
+
+ elif engine == b"aura":
+ aura_predigest = (
+ self.runtime_config.create_scale_object(
+ type_string="RawAuraPreDigest",
+ data=ScaleBytes(
+ bytes(log_digest[1][1])
+ ),
+ )
+ )
+
+ aura_predigest.decode(check_remaining=True)
+
+ rank_validator = aura_predigest.value[
+ "slot_number"
+ ] % len(validator_set)
+
+ block_author = validator_set[rank_validator]
+ block_data["author"] = block_author.value
+ else:
+ raise NotImplementedError(
+ f"Cannot extract author for engine {log_digest.value['PreRuntime'][0]}"
+ )
+ else:
+ if (
+ log_digest.value["PreRuntime"]["engine"]
+ == "BABE"
+ ):
+ validator_set = await self.query(
+ "Session",
+ "Validators",
+ block_hash=block_hash,
+ )
+ rank_validator = log_digest.value["PreRuntime"][
+ "data"
+ ]["authority_index"]
+
+ block_author = validator_set.elements[
+ rank_validator
+ ]
+ block_data["author"] = block_author.value
+ else:
+ raise NotImplementedError(
+ f"Cannot extract author for engine {log_digest.value['PreRuntime']['engine']}"
+ )
+
+ except Exception:
+ if not ignore_decoding_errors:
+ raise
+ block_data["header"]["digest"]["logs"][idx] = None
+
+ return block_data
+
+ if callable(subscription_handler):
+ rpc_method_prefix = "Finalized" if finalized_only else "New"
+
+ async def result_handler(message, update_nr, subscription_id):
+ new_block = await decode_block({"header": message["params"]["result"]})
+
+ subscription_result = subscription_handler(
+ new_block, update_nr, subscription_id
+ )
+
+ if subscription_result is not None:
+ # Handler returned end result: unsubscribe from further updates
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request(
+ f"chain_unsubscribe{rpc_method_prefix}Heads",
+ [subscription_id],
+ )
+ )
+
+ return subscription_result
+
+ result = await self._make_rpc_request(
+ [
+ self.make_payload(
+ "_get_block_handler",
+ f"chain_subscribe{rpc_method_prefix}Heads",
+ [],
+ )
+ ],
+ result_handler=result_handler,
+ )
+
+ return result
+
+ else:
+ if header_only:
+ response = await self.rpc_request("chain_getHeader", [block_hash])
+ return await decode_block(
+ {"header": response["result"]}, block_data_hash=block_hash
+ )
+
+ else:
+ response = await self.rpc_request("chain_getBlock", [block_hash])
+ return await decode_block(
+ response["result"]["block"], block_data_hash=block_hash
+ )
+
+ async def get_block(
+ self,
+ block_hash: Optional[str] = None,
+ block_number: Optional[int] = None,
+ ignore_decoding_errors: bool = False,
+ include_author: bool = False,
+ finalized_only: bool = False,
+ ) -> Optional[dict]:
+ """
+ Retrieves a block and decodes its containing extrinsics and log digest items. If `block_hash` and `block_number`
+ is omitted the chain tip will be retrieve, or the finalized head if `finalized_only` is set to true.
+
+ Either `block_hash` or `block_number` should be set, or both omitted.
+
+ Parameters
+ ----------
+ block_hash: the hash of the block to be retrieved
+ block_number: the block number to retrieved
+ ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue decoding
+ include_author: This will retrieve the block author from the validator set and add to the result
+ finalized_only: when no `block_hash` or `block_number` is set, this will retrieve the finalized head
+
+ Returns
+ -------
+ A dict containing the extrinsic and digest logs data
+ """
+ if block_hash and block_number:
+ raise ValueError("Either block_hash or block_number should be be set")
+
+ if block_number is not None:
+ block_hash = await self.get_block_hash(block_number)
+
+ if block_hash is None:
+ return
+
+ if block_hash and finalized_only:
+ raise ValueError(
+ "finalized_only cannot be True when block_hash is provided"
+ )
+
+ if block_hash is None:
+ # Retrieve block hash
+ if finalized_only:
+ block_hash = await self.get_chain_finalised_head()
+ else:
+ block_hash = await self.get_chain_head()
+
+ return await self._get_block_handler(
+ block_hash=block_hash,
+ ignore_decoding_errors=ignore_decoding_errors,
+ header_only=False,
+ include_author=include_author,
+ )
+
+ async def get_events(self, block_hash: Optional[str] = None) -> list:
+ """
+ Convenience method to get events for a certain block (storage call for module 'System' and function 'Events')
+
+ Parameters
+ ----------
+ block_hash
+
+ Returns
+ -------
+ list
+ """
+
+ def convert_event_data(data):
+ # Extract phase information
+ phase_key, phase_value = next(iter(data["phase"].items()))
+ try:
+ extrinsic_idx = phase_value[0]
+ except IndexError:
+ extrinsic_idx = None
+
+ # Extract event details
+ module_id, event_data = next(iter(data["event"].items()))
+ event_id, attributes_data = next(iter(event_data[0].items()))
+
+ # Convert class and pays_fee dictionaries to their string equivalents if they exist
+ attributes = attributes_data
+ if isinstance(attributes, dict):
+ for key, value in attributes.items():
+ if isinstance(value, dict):
+ # Convert nested single-key dictionaries to their keys as strings
+ sub_key = next(iter(value.keys()))
+ if value[sub_key] == ():
+ attributes[key] = sub_key
+
+ # Create the converted dictionary
+ converted = {
+ "phase": phase_key,
+ "extrinsic_idx": extrinsic_idx,
+ "event": {
+ "module_id": module_id,
+ "event_id": event_id,
+ "attributes": attributes,
+ },
+ "topics": list(data["topics"]), # Convert topics tuple to a list
+ }
+
+ return converted
+
+ events = []
+
+ if not block_hash:
+ block_hash = await self.get_chain_head()
+
+ storage_obj = await self.query(
+ module="System", storage_function="Events", block_hash=block_hash
+ )
+ if storage_obj:
+ for item in list(storage_obj):
+ # print("item!", item)
+ events.append(convert_event_data(item))
+ # events += list(storage_obj)
+ return events
+
+ async def get_block_runtime_version(self, block_hash: str) -> dict:
+ """
+ Retrieve the runtime version id of given block_hash
+ """
+ response = await self.rpc_request("state_getRuntimeVersion", [block_hash])
+ return response.get("result")
+
+ async def get_block_metadata(
+ self, block_hash: Optional[str] = None, decode: bool = True
+ ) -> Union[dict, ScaleType]:
+ """
+ A pass-though to existing JSONRPC method `state_getMetadata`.
+
+ Parameters
+ ----------
+ block_hash
+ decode: True for decoded version
+
+ Returns
+ -------
+
+ """
+ params = None
+ if decode and not self.runtime_config:
+ raise ValueError(
+ "Cannot decode runtime configuration without a supplied runtime_config"
+ )
+
+ if block_hash:
+ params = [block_hash]
+ response = await self.rpc_request("state_getMetadata", params)
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ if response.get("result") and decode:
+ metadata_decoder = self.runtime_config.create_scale_object(
+ "MetadataVersioned", data=ScaleBytes(response.get("result"))
+ )
+ metadata_decoder.decode()
+
+ return metadata_decoder
+
+ return response
+
+ async def _preprocess(
+ self,
+ query_for: Optional[list],
+ block_hash: Optional[str],
+ storage_function: str,
+ module: str,
+ ) -> Preprocessed:
+ """
+ Creates a Preprocessed data object for passing to `_make_rpc_request`
+ """
+ params = query_for if query_for else []
+ # Search storage call in metadata
+ metadata_pallet = self.metadata.get_metadata_pallet(module)
+
+ if not metadata_pallet:
+ raise SubstrateRequestException(f'Pallet "{module}" not found')
+
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+
+ if not metadata_pallet or not storage_item:
+ raise SubstrateRequestException(
+ f'Storage function "{module}.{storage_function}" not found'
+ )
+
+ # SCALE type string of value
+ param_types = storage_item.get_params_type_string()
+ value_scale_type = storage_item.get_value_type_string()
+
+ if len(params) != len(param_types):
+ raise ValueError(
+ f"Storage function requires {len(param_types)} parameters, {len(params)} given"
+ )
+
+ storage_key = StorageKey.create_from_storage_function(
+ module,
+ storage_item.value["name"],
+ params,
+ runtime_config=self.runtime_config,
+ metadata=self.metadata,
+ )
+ method = "state_getStorageAt"
+ return Preprocessed(
+ str(query_for),
+ method,
+ [storage_key.to_hex(), block_hash],
+ value_scale_type,
+ storage_item,
+ )
+
+ async def _process_response(
+ self,
+ response: dict,
+ subscription_id: Union[int, str],
+ value_scale_type: Optional[str] = None,
+ storage_item: Optional[ScaleType] = None,
+ runtime: Optional[Runtime] = None,
+ result_handler: Optional[ResultHandler] = None,
+ ) -> tuple[Union[ScaleType, dict], bool]:
+ """
+ Processes the RPC call response by decoding it, returning it as is, or setting a handler for subscriptions,
+ depending on the specific call.
+
+ :param response: the RPC call response
+ :param subscription_id: the subscription id for subscriptions, used only for subscriptions with a result handler
+ :param value_scale_type: Scale Type string used for decoding ScaleBytes results
+ :param storage_item: The ScaleType object used for decoding ScaleBytes results
+ :param runtime: the runtime object, used for decoding ScaleBytes results
+ :param result_handler: the result handler coroutine used for handling longer-running subscriptions
+
+ :return: (decoded response, completion)
+ """
+ result: Union[dict, ScaleType] = response
+ if value_scale_type and isinstance(storage_item, ScaleType):
+ if not runtime:
+ async with self._lock:
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+ if response.get("result") is not None:
+ query_value = response.get("result")
+ elif storage_item.value["modifier"] == "Default":
+ # Fallback to default value of storage function if no result
+ query_value = storage_item.value_object["default"].value_object
+ else:
+ # No result is interpreted as an Option<...> result
+ value_scale_type = f"Option<{value_scale_type}>"
+ query_value = storage_item.value_object["default"].value_object
+ if isinstance(query_value, str):
+ q = bytes.fromhex(query_value[2:])
+ elif isinstance(query_value, bytearray):
+ q = bytes(query_value)
+ else:
+ q = query_value
+ obj = await self.decode_scale(value_scale_type, q, True)
+ result = obj
+ if asyncio.iscoroutinefunction(result_handler):
+ # For multipart responses as a result of subscriptions.
+ message, bool_result = await result_handler(response, subscription_id)
+ return message, bool_result
+ return result, True
+
+ async def _make_rpc_request(
+ self,
+ payloads: list[dict],
+ value_scale_type: Optional[str] = None,
+ storage_item: Optional[ScaleType] = None,
+ runtime: Optional[Runtime] = None,
+ result_handler: Optional[ResultHandler] = None,
+ ) -> RequestManager.RequestResults:
+ request_manager = RequestManager(payloads)
+
+ subscription_added = False
+
+ async with self.ws as ws:
+ for item in payloads:
+ item_id = await ws.send(item["payload"])
+ request_manager.add_request(item_id, item["id"])
+
+ while True:
+ for item_id in request_manager.response_map.keys():
+ if (
+ item_id not in request_manager.responses
+ or asyncio.iscoroutinefunction(result_handler)
+ ):
+ if response := await ws.retrieve(item_id):
+ if (
+ asyncio.iscoroutinefunction(result_handler)
+ and not subscription_added
+ ):
+ # handles subscriptions, overwrites the previous mapping of {item_id : payload_id}
+ # with {subscription_id : payload_id}
+ try:
+ item_id = request_manager.overwrite_request(
+ item_id, response["result"]
+ )
+ except KeyError:
+ raise SubstrateRequestException(str(response))
+ decoded_response, complete = await self._process_response(
+ response,
+ item_id,
+ value_scale_type,
+ storage_item,
+ runtime,
+ result_handler,
+ )
+ request_manager.add_response(
+ item_id, decoded_response, complete
+ )
+ if (
+ asyncio.iscoroutinefunction(result_handler)
+ and not subscription_added
+ ):
+ subscription_added = True
+ break
+
+ if request_manager.is_complete:
+ break
+
+ return request_manager.get_results()
+
+ @staticmethod
+ def make_payload(id_: str, method: str, params: list) -> dict:
+ """
+ Creates a payload for making an rpc_request with _make_rpc_request
+
+ :param id_: a unique name you would like to give to this request
+ :param method: the method in the RPC request
+ :param params: the params in the RPC request
+
+ :return: the payload dict
+ """
+ return {
+ "id": id_,
+ "payload": {"jsonrpc": "2.0", "method": method, "params": params},
+ }
+
+ async def rpc_request(
+ self,
+ method: str,
+ params: Optional[list],
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> Any:
+ """
+ Makes an RPC request to the subtensor. Use this only if ``self.query`` and ``self.query_multiple`` and
+ ``self.query_map`` do not meet your needs.
+
+ :param method: str the method in the RPC request
+ :param params: list of the params in the RPC request
+ :param block_hash: optional str, the hash of the block — only supply this if not supplying the block
+ hash in the params, and not reusing the block hash
+ :param reuse_block_hash: optional bool, whether to reuse the block hash in the params — only mark as True
+ if not supplying the block hash in the params, or via the `block_hash` parameter
+
+ :return: the response from the RPC request
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ params = params or []
+ payload_id = f"{method}{random.randint(0, 7000)}"
+ payloads = [
+ self.make_payload(
+ payload_id,
+ method,
+ params + [block_hash] if block_hash else params,
+ )
+ ]
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+ result = await self._make_rpc_request(payloads, runtime=runtime)
+ if "error" in result[payload_id][0]:
+ raise SubstrateRequestException(result[payload_id][0]["error"]["message"])
+ if "result" in result[payload_id][0]:
+ return result[payload_id][0]
+ else:
+ raise SubstrateRequestException(result[payload_id][0])
+
+ async def get_block_hash(self, block_id: int) -> str:
+ return (await self.rpc_request("chain_getBlockHash", [block_id]))["result"]
+
+ async def get_chain_head(self) -> str:
+ result = await self._make_rpc_request(
+ [
+ self.make_payload(
+ "rpc_request",
+ "chain_getHead",
+ [],
+ )
+ ],
+ runtime=Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ ),
+ )
+ self.last_block_hash = result["rpc_request"][0]["result"]
+ return result["rpc_request"][0]["result"]
+
+ async def compose_call(
+ self,
+ call_module: str,
+ call_function: str,
+ call_params: Optional[dict] = None,
+ block_hash: Optional[str] = None,
+ ) -> GenericCall:
+ """
+ Composes a call payload which can be used in an extrinsic.
+
+ :param call_module: Name of the runtime module e.g. Balances
+ :param call_function: Name of the call function e.g. transfer
+ :param call_params: This is a dict containing the params of the call. e.g.
+ `{'dest': 'EaG2CRhJWPb7qmdcJvy3LiWdh26Jreu9Dx6R1rXxPmYXoDk', 'value': 1000000000000}`
+ :param block_hash: Use metadata at given block_hash to compose call
+
+ :return: A composed call
+ """
+ if call_params is None:
+ call_params = {}
+
+ await self.init_runtime(block_hash=block_hash)
+
+ call = self.runtime_config.create_scale_object(
+ type_string="Call", metadata=self.metadata
+ )
+
+ call.encode(
+ {
+ "call_module": call_module,
+ "call_function": call_function,
+ "call_args": call_params,
+ }
+ )
+
+ return call
+
+ async def query_multiple(
+ self,
+ params: list,
+ storage_function: str,
+ module: str,
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> dict[str, ScaleType]:
+ """
+ Queries the subtensor. Only use this when making multiple queries, else use ``self.query``
+ """
+ # By allowing for specifying the block hash, users, if they have multiple query types they want
+ # to do, can simply query the block hash first, and then pass multiple query_subtensor calls
+ # into an asyncio.gather, with the specified block hash
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+ preprocessed: tuple[Preprocessed] = await asyncio.gather(
+ *[
+ self._preprocess([x], block_hash, storage_function, module)
+ for x in params
+ ]
+ )
+ all_info = [
+ self.make_payload(item.queryable, item.method, item.params)
+ for item in preprocessed
+ ]
+ # These will always be the same throughout the preprocessed list, so we just grab the first one
+ value_scale_type = preprocessed[0].value_scale_type
+ storage_item = preprocessed[0].storage_item
+
+ responses = await self._make_rpc_request(
+ all_info, value_scale_type, storage_item, runtime
+ )
+ return {
+ param: responses[p.queryable][0] for (param, p) in zip(params, preprocessed)
+ }
+
+ async def query_multi(
+ self, storage_keys: list[StorageKey], block_hash: Optional[str] = None
+ ) -> list:
+ """
+ Query multiple storage keys in one request.
+
+ Example:
+
+ ```
+ storage_keys = [
+ substrate.create_storage_key(
+ "System", "Account", ["F4xQKRUagnSGjFqafyhajLs94e7Vvzvr8ebwYJceKpr8R7T"]
+ ),
+ substrate.create_storage_key(
+ "System", "Account", ["GSEX8kR4Kz5UZGhvRUCJG93D5hhTAoVZ5tAe6Zne7V42DSi"]
+ )
+ ]
+
+ result = substrate.query_multi(storage_keys)
+ ```
+
+ Parameters
+ ----------
+ storage_keys: list of StorageKey objects
+ block_hash: Optional block_hash of state snapshot
+
+ Returns
+ -------
+ list of `(storage_key, scale_obj)` tuples
+ """
+
+ await self.init_runtime(block_hash=block_hash)
+
+ # Retrieve corresponding value
+ response = await self.rpc_request(
+ "state_queryStorageAt", [[s.to_hex() for s in storage_keys], block_hash]
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ result = []
+
+ storage_key_map = {s.to_hex(): s for s in storage_keys}
+
+ for result_group in response["result"]:
+ for change_storage_key, change_data in result_group["changes"]:
+ # Decode result for specified storage_key
+ storage_key = storage_key_map[change_storage_key]
+ if change_data is None:
+ change_data = b"\x00"
+ else:
+ change_data = bytes.fromhex(change_data[2:])
+ result.append(
+ (
+ storage_key,
+ await self.decode_scale(
+ storage_key.value_scale_type, change_data
+ ),
+ )
+ )
+
+ return result
+
+ async def create_scale_object(
+ self,
+ type_string: str,
+ data: Optional[ScaleBytes] = None,
+ block_hash: Optional[str] = None,
+ **kwargs,
+ ) -> "ScaleType":
+ """
+ Convenience method to create a SCALE object of type `type_string`, this will initialize the runtime
+ automatically at moment of `block_hash`, or chain tip if omitted.
+
+ :param type_string: str Name of SCALE type to create
+ :param data: ScaleBytes Optional ScaleBytes to decode
+ :param block_hash: Optional block hash for moment of decoding, when omitted the chain tip will be used
+ :param kwargs: keyword args for the Scale Type constructor
+
+ :return: The created Scale Type object
+ """
+ runtime = await self.init_runtime(block_hash=block_hash)
+ if "metadata" not in kwargs:
+ kwargs["metadata"] = runtime.metadata
+
+ return runtime.runtime_config.create_scale_object(
+ type_string, data=data, **kwargs
+ )
+
+ async def generate_signature_payload(
+ self,
+ call: GenericCall,
+ era=None,
+ nonce: int = 0,
+ tip: int = 0,
+ tip_asset_id: Optional[int] = None,
+ include_call_length: bool = False,
+ ) -> ScaleBytes:
+ # Retrieve genesis hash
+ genesis_hash = await self.get_block_hash(0)
+
+ if not era:
+ era = "00"
+
+ if era == "00":
+ # Immortal extrinsic
+ block_hash = genesis_hash
+ else:
+ # Determine mortality of extrinsic
+ era_obj = self.runtime_config.create_scale_object("Era")
+
+ if isinstance(era, dict) and "current" not in era and "phase" not in era:
+ raise ValueError(
+ 'The era dict must contain either "current" or "phase" element to encode a valid era'
+ )
+
+ era_obj.encode(era)
+ block_hash = await self.get_block_hash(
+ block_id=era_obj.birth(era.get("current"))
+ )
+
+ # Create signature payload
+ signature_payload = self.runtime_config.create_scale_object(
+ "ExtrinsicPayloadValue"
+ )
+
+ # Process signed extensions in metadata
+ if "signed_extensions" in self.metadata[1][1]["extrinsic"]:
+ # Base signature payload
+ signature_payload.type_mapping = [["call", "CallBytes"]]
+
+ # Add signed extensions to payload
+ signed_extensions = self.metadata.get_signed_extensions()
+
+ if "CheckMortality" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["era", signed_extensions["CheckMortality"]["extrinsic"]]
+ )
+
+ if "CheckEra" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["era", signed_extensions["CheckEra"]["extrinsic"]]
+ )
+
+ if "CheckNonce" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["nonce", signed_extensions["CheckNonce"]["extrinsic"]]
+ )
+
+ if "ChargeTransactionPayment" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["tip", signed_extensions["ChargeTransactionPayment"]["extrinsic"]]
+ )
+
+ if "ChargeAssetTxPayment" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["asset_id", signed_extensions["ChargeAssetTxPayment"]["extrinsic"]]
+ )
+
+ if "CheckMetadataHash" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["mode", signed_extensions["CheckMetadataHash"]["extrinsic"]]
+ )
+
+ if "CheckSpecVersion" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "spec_version",
+ signed_extensions["CheckSpecVersion"]["additional_signed"],
+ ]
+ )
+
+ if "CheckTxVersion" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "transaction_version",
+ signed_extensions["CheckTxVersion"]["additional_signed"],
+ ]
+ )
+
+ if "CheckGenesis" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "genesis_hash",
+ signed_extensions["CheckGenesis"]["additional_signed"],
+ ]
+ )
+
+ if "CheckMortality" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "block_hash",
+ signed_extensions["CheckMortality"]["additional_signed"],
+ ]
+ )
+
+ if "CheckEra" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["block_hash", signed_extensions["CheckEra"]["additional_signed"]]
+ )
+
+ if "CheckMetadataHash" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "metadata_hash",
+ signed_extensions["CheckMetadataHash"]["additional_signed"],
+ ]
+ )
+
+ if include_call_length:
+ length_obj = self.runtime_config.create_scale_object("Bytes")
+ call_data = str(length_obj.encode(str(call.data)))
+
+ else:
+ call_data = str(call.data)
+
+ payload_dict = {
+ "call": call_data,
+ "era": era,
+ "nonce": nonce,
+ "tip": tip,
+ "spec_version": self.runtime_version,
+ "genesis_hash": genesis_hash,
+ "block_hash": block_hash,
+ "transaction_version": self.transaction_version,
+ "asset_id": {"tip": tip, "asset_id": tip_asset_id},
+ "metadata_hash": None,
+ "mode": "Disabled",
+ }
+
+ signature_payload.encode(payload_dict)
+
+ if signature_payload.data.length > 256:
+ return ScaleBytes(
+ data=blake2b(signature_payload.data.data, digest_size=32).digest()
+ )
+
+ return signature_payload.data
+
+ async def create_signed_extrinsic(
+ self,
+ call: GenericCall,
+ keypair: Keypair,
+ era: Optional[dict] = None,
+ nonce: Optional[int] = None,
+ tip: int = 0,
+ tip_asset_id: Optional[int] = None,
+ signature: Optional[Union[bytes, str]] = None,
+ ) -> "GenericExtrinsic":
+ """
+ Creates an extrinsic signed by given account details
+
+ :param call: GenericCall to create extrinsic for
+ :param keypair: Keypair used to sign the extrinsic
+ :param era: Specify mortality in blocks in follow format:
+ {'period': [amount_blocks]} If omitted the extrinsic is immortal
+ :param nonce: nonce to include in extrinsics, if omitted the current nonce is retrieved on-chain
+ :param tip: The tip for the block author to gain priority during network congestion
+ :param tip_asset_id: Optional asset ID with which to pay the tip
+ :param signature: Optionally provide signature if externally signed
+
+ :return: The signed Extrinsic
+ """
+ await self.init_runtime()
+
+ # Check requirements
+ if not isinstance(call, GenericCall):
+ raise TypeError("'call' must be of type Call")
+
+ # Check if extrinsic version is supported
+ if self.metadata[1][1]["extrinsic"]["version"] != 4: # type: ignore
+ raise NotImplementedError(
+ f"Extrinsic version {self.metadata[1][1]['extrinsic']['version']} not supported" # type: ignore
+ )
+
+ # Retrieve nonce
+ if nonce is None:
+ nonce = await self.get_account_nonce(keypair.ss58_address) or 0
+
+ # Process era
+ if era is None:
+ era = "00"
+ else:
+ if isinstance(era, dict) and "current" not in era and "phase" not in era:
+ # Retrieve current block id
+ era["current"] = await self.get_block_number(
+ await self.get_chain_finalised_head()
+ )
+
+ if signature is not None:
+ if isinstance(signature, str) and signature[0:2] == "0x":
+ signature = bytes.fromhex(signature[2:])
+
+ # Check if signature is a MultiSignature and contains signature version
+ if len(signature) == 65:
+ signature_version = signature[0]
+ signature = signature[1:]
+ else:
+ signature_version = keypair.crypto_type
+
+ else:
+ # Create signature payload
+ signature_payload = await self.generate_signature_payload(
+ call=call, era=era, nonce=nonce, tip=tip, tip_asset_id=tip_asset_id
+ )
+
+ # Set Signature version to crypto type of keypair
+ signature_version = keypair.crypto_type
+
+ # Sign payload
+ signature = keypair.sign(signature_payload)
+
+ # Create extrinsic
+ extrinsic = self.runtime_config.create_scale_object(
+ type_string="Extrinsic", metadata=self.metadata
+ )
+
+ value = {
+ "account_id": f"0x{keypair.public_key.hex()}",
+ "signature": f"0x{signature.hex()}",
+ "call_function": call.value["call_function"],
+ "call_module": call.value["call_module"],
+ "call_args": call.value["call_args"],
+ "nonce": nonce,
+ "era": era,
+ "tip": tip,
+ "asset_id": {"tip": tip, "asset_id": tip_asset_id},
+ "mode": "Disabled",
+ }
+
+ # Check if ExtrinsicSignature is MultiSignature, otherwise omit signature_version
+ signature_cls = self.runtime_config.get_decoder_class("ExtrinsicSignature")
+ if issubclass(signature_cls, self.runtime_config.get_decoder_class("Enum")):
+ value["signature_version"] = signature_version
+
+ extrinsic.encode(value)
+
+ return extrinsic
+
+ async def get_chain_finalised_head(self):
+ """
+ A pass-though to existing JSONRPC method `chain_getFinalizedHead`
+
+ Returns
+ -------
+
+ """
+ response = await self.rpc_request("chain_getFinalizedHead", [])
+
+ if response is not None:
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ return response.get("result")
+
+ async def runtime_call(
+ self,
+ api: str,
+ method: str,
+ params: Optional[Union[list, dict]] = None,
+ block_hash: Optional[str] = None,
+ ) -> ScaleType:
+ """
+ Calls a runtime API method
+
+ :param api: Name of the runtime API e.g. 'TransactionPaymentApi'
+ :param method: Name of the method e.g. 'query_fee_details'
+ :param params: List of parameters needed to call the runtime API
+ :param block_hash: Hash of the block at which to make the runtime API call
+
+ :return: ScaleType from the runtime call
+ """
+ await self.init_runtime()
+
+ if params is None:
+ params = {}
+
+ try:
+ runtime_call_def = self.runtime_config.type_registry["runtime_api"][api][
+ "methods"
+ ][method]
+ runtime_api_types = self.runtime_config.type_registry["runtime_api"][
+ api
+ ].get("types", {})
+ except KeyError:
+ raise ValueError(f"Runtime API Call '{api}.{method}' not found in registry")
+
+ if isinstance(params, list) and len(params) != len(runtime_call_def["params"]):
+ raise ValueError(
+ f"Number of parameter provided ({len(params)}) does not "
+ f"match definition {len(runtime_call_def['params'])}"
+ )
+
+ # Add runtime API types to registry
+ self.runtime_config.update_type_registry_types(runtime_api_types)
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ # Encode params
+ param_data = ScaleBytes(bytes())
+ for idx, param in enumerate(runtime_call_def["params"]):
+ scale_obj = runtime.runtime_config.create_scale_object(param["type"])
+ if isinstance(params, list):
+ param_data += scale_obj.encode(params[idx])
+ else:
+ if param["name"] not in params:
+ raise ValueError(f"Runtime Call param '{param['name']}' is missing")
+
+ param_data += scale_obj.encode(params[param["name"]])
+
+ # RPC request
+ result_data = await self.rpc_request(
+ "state_call", [f"{api}_{method}", str(param_data), block_hash]
+ )
+
+ # Decode result
+ # TODO update this to use bt-decode
+ result_obj = runtime.runtime_config.create_scale_object(
+ runtime_call_def["type"]
+ )
+ result_obj.decode(
+ ScaleBytes(result_data["result"]),
+ check_remaining=self.config.get("strict_scale_decode"),
+ )
+
+ return result_obj
+
+ async def get_account_nonce(self, account_address: str) -> int:
+ """
+ Returns current nonce for given account address
+
+ :param account_address: SS58 formatted address
+
+ :return: Nonce for given account address
+ """
+ nonce_obj = await self.runtime_call(
+ "AccountNonceApi", "account_nonce", [account_address]
+ )
+ return nonce_obj.value
+
+ async def get_metadata_constant(self, module_name, constant_name, block_hash=None):
+ """
+ Retrieves the details of a constant for given module name, call function name and block_hash
+ (or chaintip if block_hash is omitted)
+
+ Parameters
+ ----------
+ module_name
+ constant_name
+ block_hash
+
+ Returns
+ -------
+ MetadataModuleConstants
+ """
+
+ # await self.init_runtime(block_hash=block_hash)
+
+ for module in self.metadata.pallets:
+ if module_name == module.name and module.constants:
+ for constant in module.constants:
+ if constant_name == constant.value["name"]:
+ return constant
+
+ async def get_constant(
+ self,
+ module_name: str,
+ constant_name: str,
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> "ScaleType":
+ """
+ Returns the decoded `ScaleType` object of the constant for given module name, call function name and block_hash
+ (or chaintip if block_hash is omitted)
+
+ Parameters
+ ----------
+ :param module_name: Name of the module to query
+ :param constant_name: Name of the constant to query
+ :param block_hash: Hash of the block at which to make the runtime API call
+ :param reuse_block_hash: Reuse last-used block hash if set to true
+
+ :return: ScaleType from the runtime call
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ constant = await self.get_metadata_constant(
+ module_name, constant_name, block_hash=block_hash
+ )
+ if constant:
+ # Decode to ScaleType
+ return await self.decode_scale(
+ constant.type,
+ bytes(constant.constant_value),
+ return_scale_obj=True,
+ )
+ else:
+ return None
+
+ async def get_payment_info(
+ self, call: GenericCall, keypair: Keypair
+ ) -> dict[str, Any]:
+ """
+ Retrieves fee estimation via RPC for given extrinsic
+
+ Parameters
+ ----------
+ call: Call object to estimate fees for
+ keypair: Keypair of the sender, does not have to include private key because no valid signature is required
+
+ Returns
+ -------
+ Dict with payment info
+
+ E.g. `{'class': 'normal', 'partialFee': 151000000, 'weight': {'ref_time': 143322000}}`
+
+ """
+
+ # Check requirements
+ if not isinstance(call, GenericCall):
+ raise TypeError("'call' must be of type Call")
+
+ if not isinstance(keypair, Keypair):
+ raise TypeError("'keypair' must be of type Keypair")
+
+ # No valid signature is required for fee estimation
+ signature = "0x" + "00" * 64
+
+ # Create extrinsic
+ extrinsic = await self.create_signed_extrinsic(
+ call=call, keypair=keypair, signature=signature
+ )
+ extrinsic_len = self.runtime_config.create_scale_object("u32")
+ extrinsic_len.encode(len(extrinsic.data))
+
+ result = await self.runtime_call(
+ "TransactionPaymentApi", "query_info", [extrinsic, extrinsic_len]
+ )
+
+ return result.value
+
+ async def query(
+ self,
+ module: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ raw_storage_key: Optional[bytes] = None,
+ subscription_handler=None,
+ reuse_block_hash: bool = False,
+ ) -> Union["ScaleType"]:
+ """
+ Queries subtensor. This should only be used when making a single request. For multiple requests,
+ you should use ``self.query_multiple``
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+ preprocessed: Preprocessed = await self._preprocess(
+ params, block_hash, storage_function, module
+ )
+ payload = [
+ self.make_payload(
+ preprocessed.queryable, preprocessed.method, preprocessed.params
+ )
+ ]
+ value_scale_type = preprocessed.value_scale_type
+ storage_item = preprocessed.storage_item
+
+ responses = await self._make_rpc_request(
+ payload,
+ value_scale_type,
+ storage_item,
+ runtime,
+ result_handler=subscription_handler,
+ )
+ return responses[preprocessed.queryable][0]
+
+ async def query_map(
+ self,
+ module: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ max_results: Optional[int] = None,
+ start_key: Optional[str] = None,
+ page_size: int = 100,
+ ignore_decoding_errors: bool = False,
+ reuse_block_hash: bool = False,
+ ) -> "QueryMapResult":
+ """
+ Iterates over all key-pairs located at the given module and storage_function. The storage
+ item must be a map.
+
+ Example:
+
+ ```
+ result = await substrate.query_map('System', 'Account', max_results=100)
+
+ async for account, account_info in result:
+ print(f"Free balance of account '{account.value}': {account_info.value['data']['free']}")
+ ```
+
+ Note: it is important that you do not use `for x in result.records`, as this will sidestep possible
+ pagination. You must do `async for x in result`.
+
+ :param module: The module name in the metadata, e.g. System or Balances.
+ :param storage_function: The storage function name, e.g. Account or Locks.
+ :param params: The input parameters in case of for example a `DoubleMap` storage function
+ :param block_hash: Optional block hash for result at given block, when left to None the chain tip will be used.
+ :param max_results: the maximum of results required, if set the query will stop fetching results when number is
+ reached
+ :param start_key: The storage key used as offset for the results, for pagination purposes
+ :param page_size: The results are fetched from the node RPC in chunks of this size
+ :param ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue
+ decoding
+ :param reuse_block_hash: use True if you wish to make the query using the last-used block hash. Do not mark True
+ if supplying a block_hash
+
+ :return: QueryMapResult object
+ """
+ params = params or []
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+
+ metadata_pallet = runtime.metadata.get_metadata_pallet(module)
+ if not metadata_pallet:
+ raise ValueError(f'Pallet "{module}" not found')
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+
+ if not metadata_pallet or not storage_item:
+ raise ValueError(
+ f'Storage function "{module}.{storage_function}" not found'
+ )
+
+ value_type = storage_item.get_value_type_string()
+ param_types = storage_item.get_params_type_string()
+ key_hashers = storage_item.get_param_hashers()
+
+ # Check MapType conditions
+ if len(param_types) == 0:
+ raise ValueError("Given storage function is not a map")
+ if len(params) > len(param_types) - 1:
+ raise ValueError(
+ f"Storage function map can accept max {len(param_types) - 1} parameters, {len(params)} given"
+ )
+
+ # Generate storage key prefix
+ storage_key = StorageKey.create_from_storage_function(
+ module,
+ storage_item.value["name"],
+ params,
+ runtime_config=runtime.runtime_config,
+ metadata=runtime.metadata,
+ )
+ prefix = storage_key.to_hex()
+
+ if not start_key:
+ start_key = prefix
+
+ # Make sure if the max result is smaller than the page size, adjust the page size
+ if max_results is not None and max_results < page_size:
+ page_size = max_results
+
+ # Retrieve storage keys
+ response = await self.rpc_request(
+ method="state_getKeysPaged",
+ params=[prefix, page_size, start_key, block_hash],
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ result_keys = response.get("result")
+
+ result = []
+ last_key = None
+
+ def concat_hash_len(key_hasher: str) -> int:
+ """
+ Helper function to avoid if statements
+ """
+ if key_hasher == "Blake2_128Concat":
+ return 16
+ elif key_hasher == "Twox64Concat":
+ return 8
+ elif key_hasher == "Identity":
+ return 0
+ else:
+ raise ValueError("Unsupported hash type")
+
+ if len(result_keys) > 0:
+ last_key = result_keys[-1]
+
+ # Retrieve corresponding value
+ response = await self.rpc_request(
+ method="state_queryStorageAt", params=[result_keys, block_hash]
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ for result_group in response["result"]:
+ for item in result_group["changes"]:
+ try:
+ # Determine type string
+ key_type_string = []
+ for n in range(len(params), len(param_types)):
+ key_type_string.append(
+ f"[u8; {concat_hash_len(key_hashers[n])}]"
+ )
+ key_type_string.append(param_types[n])
+
+ item_key_obj = await self.decode_scale(
+ type_string=f"({', '.join(key_type_string)})",
+ scale_bytes=bytes.fromhex(item[0][len(prefix) :]),
+ return_scale_obj=True,
+ )
+
+ # strip key_hashers to use as item key
+ if len(param_types) - len(params) == 1:
+ item_key = item_key_obj[1]
+ else:
+ item_key = tuple(
+ item_key_obj[key + 1]
+ for key in range(len(params), len(param_types) + 1, 2)
+ )
+
+ except Exception as _:
+ if not ignore_decoding_errors:
+ raise
+ item_key = None
+
+ try:
+ try:
+ item_bytes = bytes.fromhex(item[1][2:])
+ except ValueError:
+ item_bytes = bytes.fromhex(item[1])
+
+ item_value = await self.decode_scale(
+ type_string=value_type,
+ scale_bytes=item_bytes,
+ return_scale_obj=True,
+ )
+ except Exception as _:
+ if not ignore_decoding_errors:
+ raise
+ item_value = None
+
+ result.append([item_key, item_value])
+
+ return QueryMapResult(
+ records=result,
+ page_size=page_size,
+ module=module,
+ storage_function=storage_function,
+ params=params,
+ block_hash=block_hash,
+ substrate=self,
+ last_key=last_key,
+ max_results=max_results,
+ ignore_decoding_errors=ignore_decoding_errors,
+ )
+
+ async def submit_extrinsic(
+ self,
+ extrinsic: GenericExtrinsic,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ ) -> "ExtrinsicReceipt":
+ """
+ Submit an extrinsic to the connected node, with the possibility to wait until the extrinsic is included
+ in a block and/or the block is finalized. The receipt returned provided information about the block and
+ triggered events
+
+ Parameters
+ ----------
+ extrinsic: Extrinsic The extrinsic to be sent to the network
+ wait_for_inclusion: wait until extrinsic is included in a block (only works for websocket connections)
+ wait_for_finalization: wait until extrinsic is finalized (only works for websocket connections)
+
+ Returns
+ -------
+ ExtrinsicReceipt
+
+ """
+
+ # Check requirements
+ if not isinstance(extrinsic, GenericExtrinsic):
+ raise TypeError("'extrinsic' must be of type Extrinsics")
+
+ async def result_handler(message: dict, subscription_id) -> tuple[dict, bool]:
+ """
+ Result handler function passed as an arg to _make_rpc_request as the result_handler
+ to handle the results of the extrinsic rpc call, which are multipart, and require
+ subscribing to the message
+
+ :param message: message received from the rpc call
+ :param subscription_id: subscription id received from the initial rpc call for the subscription
+
+ :returns: tuple containing the dict of the block info for the subscription, and bool for whether
+ the subscription is completed.
+ """
+ # Check if extrinsic is included and finalized
+ if "params" in message and isinstance(message["params"]["result"], dict):
+ # Convert result enum to lower for backwards compatibility
+ message_result = {
+ k.lower(): v for k, v in message["params"]["result"].items()
+ }
+
+ if "finalized" in message_result and wait_for_finalization:
+ # Created as a task because we don't actually care about the result
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request("author_unwatchExtrinsic", [subscription_id])
+ )
+ return {
+ "block_hash": message_result["finalized"],
+ "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()),
+ "finalized": True,
+ }, True
+ elif (
+ "inblock" in message_result
+ and wait_for_inclusion
+ and not wait_for_finalization
+ ):
+ # Created as a task because we don't actually care about the result
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request("author_unwatchExtrinsic", [subscription_id])
+ )
+ return {
+ "block_hash": message_result["inblock"],
+ "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()),
+ "finalized": False,
+ }, True
+ return message, False
+
+ if wait_for_inclusion or wait_for_finalization:
+ responses = (
+ await self._make_rpc_request(
+ [
+ self.make_payload(
+ "rpc_request",
+ "author_submitAndWatchExtrinsic",
+ [str(extrinsic.data)],
+ )
+ ],
+ result_handler=result_handler,
+ )
+ )["rpc_request"]
+ response = next(
+ (r for r in responses if "block_hash" in r and "extrinsic_hash" in r),
+ None,
+ )
+
+ if not response:
+ raise SubstrateRequestException(responses)
+
+ # Also, this will be a multipart response, so maybe should change to everything after the first response?
+ # The following code implies this will be a single response after the initial subscription id.
+ result = ExtrinsicReceipt(
+ substrate=self,
+ extrinsic_hash=response["extrinsic_hash"],
+ block_hash=response["block_hash"],
+ finalized=response["finalized"],
+ )
+
+ else:
+ response = await self.rpc_request(
+ "author_submitExtrinsic", [str(extrinsic.data)]
+ )
+
+ if "result" not in response:
+ raise SubstrateRequestException(response.get("error"))
+
+ result = ExtrinsicReceipt(substrate=self, extrinsic_hash=response["result"])
+
+ return result
+
+ async def get_metadata_call_function(
+ self,
+ module_name: str,
+ call_function_name: str,
+ block_hash: Optional[str] = None,
+ ) -> Optional[list]:
+ """
+ Retrieves a list of all call functions in metadata active for given block_hash (or chaintip if block_hash
+ is omitted)
+
+ :param module_name: name of the module
+ :param call_function_name: name of the call function
+ :param block_hash: optional block hash
+
+ :return: list of call functions
+ """
+ runtime = await self.init_runtime(block_hash=block_hash)
+
+ for pallet in runtime.metadata.pallets:
+ if pallet.name == module_name and pallet.calls:
+ for call in pallet.calls:
+ if call.name == call_function_name:
+ return call
+ return None
+
+ async def get_block_number(self, block_hash: Optional[str] = None) -> int:
+ """Async version of `substrateinterface.base.get_block_number` method."""
+ response = await self.rpc_request("chain_getHeader", [block_hash])
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ elif "result" in response:
+ if response["result"]:
+ return int(response["result"]["number"], 16)
+
+ async def close(self):
+ """
+ Closes the substrate connection, and the websocket connection.
+ """
+ try:
+ await self.ws.shutdown()
+ except AttributeError:
+ pass
diff --git a/bittensor/utils/delegates_details.py b/bittensor/utils/delegates_details.py
new file mode 100644
index 0000000000..88a5633e76
--- /dev/null
+++ b/bittensor/utils/delegates_details.py
@@ -0,0 +1,43 @@
+from dataclasses import dataclass
+from typing import Any, Optional
+
+
+@dataclass
+class DelegatesDetails:
+ display: str
+ additional: list[tuple[str, str]]
+ web: str
+ legal: Optional[str] = None
+ riot: Optional[str] = None
+ email: Optional[str] = None
+ pgp_fingerprint: Optional[str] = None
+ image: Optional[str] = None
+ twitter: Optional[str] = None
+
+ @classmethod
+ def from_chain_data(cls, data: dict[str, Any]) -> "DelegatesDetails":
+ def decode(key: str, default: Optional[str] = ""):
+ try:
+ if isinstance(data.get(key), dict):
+ value = next(data.get(key).values())
+ return bytes(value[0]).decode("utf-8")
+ elif isinstance(data.get(key), int):
+ return data.get(key)
+ elif isinstance(data.get(key), tuple):
+ return bytes(data.get(key)[0]).decode("utf-8")
+ else:
+ return default
+ except (UnicodeDecodeError, TypeError):
+ return default
+
+ return cls(
+ display=decode("display"),
+ additional=decode("additional", []),
+ web=decode("web"),
+ legal=decode("legal"),
+ riot=decode("riot"),
+ email=decode("email"),
+ pgp_fingerprint=decode("pgp_fingerprint", None),
+ image=decode("image"),
+ twitter=decode("twitter"),
+ )
diff --git a/bittensor/utils/deprecated.py b/bittensor/utils/deprecated.py
index 146e8395d0..124c0daac9 100644
--- a/bittensor/utils/deprecated.py
+++ b/bittensor/utils/deprecated.py
@@ -45,6 +45,7 @@
from bittensor_wallet import Keypair # noqa: F401
from bittensor.core import settings
+from bittensor.core.async_subtensor import AsyncSubtensor
from bittensor.core.axon import Axon
from bittensor.core.chain_data import ( # noqa: F401
AxonInfo,
@@ -116,6 +117,7 @@
from bittensor.utils.subnets import SubnetsAPI # noqa: F401
# Backwards compatibility with previous bittensor versions.
+async_subtensor = AsyncSubtensor
axon = Axon
config = Config
dendrite = Dendrite
diff --git a/requirements/prod.txt b/requirements/prod.txt
index 17c73f6f25..bb8e243948 100644
--- a/requirements/prod.txt
+++ b/requirements/prod.txt
@@ -1,6 +1,8 @@
wheel
setuptools~=70.0.0
aiohttp~=3.9
+async-property==0.2.2
+backoff
bittensor-cli
bt-decode
colorama~=0.4.6
diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py
index 59170c9512..4a7b2ccf62 100644
--- a/tests/e2e_tests/conftest.py
+++ b/tests/e2e_tests/conftest.py
@@ -8,7 +8,7 @@
import pytest
from substrateinterface import SubstrateInterface
-from bittensor import logging
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.e2e_test_utils import (
clone_or_update_templates,
install_templates,
diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py
index 853719f85d..b5d18c5729 100644
--- a/tests/e2e_tests/test_axon.py
+++ b/tests/e2e_tests/test_axon.py
@@ -4,8 +4,8 @@
import pytest
import bittensor
-from bittensor import logging
from bittensor.utils import networking
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import register_neuron, register_subnet
from tests.e2e_tests.utils.e2e_test_utils import (
setup_wallet,
diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py
index ca9b0a0a2c..962a061a9a 100644
--- a/tests/e2e_tests/test_commit_weights.py
+++ b/tests/e2e_tests/test_commit_weights.py
@@ -3,8 +3,9 @@
import numpy as np
import pytest
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
@@ -48,7 +49,7 @@ async def test_commit_and_reveal_weights(local_chain):
), "Unable to register Alice as a neuron"
# Stake to become to top neuron after the first epoch
- add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000))
+ add_stake(local_chain, alice_wallet, Balance.from_tao(100_000))
# Enable commit_reveal on the subnet
assert sudo_set_hyperparameter_bool(
@@ -59,7 +60,7 @@ async def test_commit_and_reveal_weights(local_chain):
netuid,
), "Unable to enable commit reveal on the subnet"
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert subtensor.get_subnet_hyperparameters(
netuid=netuid
).commit_reveal_weights_enabled, "Failed to enable commit/reveal"
@@ -73,7 +74,7 @@ async def test_commit_and_reveal_weights(local_chain):
return_error_message=True,
)
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(
netuid=netuid
@@ -92,7 +93,7 @@ async def test_commit_and_reveal_weights(local_chain):
call_params={"netuid": netuid, "weights_set_rate_limit": "0"},
return_error_message=True,
)
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0
), "Failed to set weights_rate_limit"
diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py
index e075326ca5..279e151346 100644
--- a/tests/e2e_tests/test_dendrite.py
+++ b/tests/e2e_tests/test_dendrite.py
@@ -3,20 +3,21 @@
import pytest
-import bittensor
-from bittensor import logging, Subtensor
-
-from tests.e2e_tests.utils.e2e_test_utils import (
- setup_wallet,
- template_path,
- templates_repo,
-)
+from bittensor.core.metagraph import Metagraph
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
register_neuron,
register_subnet,
add_stake,
wait_epoch,
)
+from tests.e2e_tests.utils.e2e_test_utils import (
+ setup_wallet,
+ template_path,
+ templates_repo,
+)
@pytest.mark.asyncio
@@ -56,7 +57,7 @@ async def test_dendrite(local_chain):
local_chain, bob_wallet, netuid
), f"Neuron wasn't registered to subnet {netuid}"
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
subtensor = Subtensor(network="ws://localhost:9945")
# Assert one neuron is Bob
@@ -69,10 +70,10 @@ async def test_dendrite(local_chain):
assert neuron.stake.tao == 0
# Stake to become to top neuron after the first epoch
- assert add_stake(local_chain, bob_wallet, bittensor.Balance.from_tao(10_000))
+ assert add_stake(local_chain, bob_wallet, Balance.from_tao(10_000))
# Refresh metagraph
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
old_neuron = metagraph.neurons[0]
# Assert stake is 10000
@@ -121,7 +122,7 @@ async def test_dendrite(local_chain):
await wait_epoch(subtensor, netuid=netuid)
# Refresh metagraph
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
# Refresh validator neuron
updated_neuron = metagraph.neurons[0]
diff --git a/tests/e2e_tests/test_liquid_alpha.py b/tests/e2e_tests/test_liquid_alpha.py
index d73162fbb4..4725704f61 100644
--- a/tests/e2e_tests/test_liquid_alpha.py
+++ b/tests/e2e_tests/test_liquid_alpha.py
@@ -1,5 +1,6 @@
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
register_neuron,
@@ -49,10 +50,10 @@ def test_liquid_alpha(local_chain):
), "Unable to register Alice as a neuron"
# Stake to become to top neuron after the first epoch
- add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000))
+ add_stake(local_chain, alice_wallet, Balance.from_tao(100_000))
# Assert liquid alpha is disabled
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).liquid_alpha_enabled
is False
@@ -118,7 +119,7 @@ def test_liquid_alpha(local_chain):
alpha_high_too_high = u16_max + 1 # One more than the max acceptable value
call_params = liquid_alpha_call_params(netuid, f"6553, {alpha_high_too_high}")
try:
- result, error_message = sudo_set_hyperparameter_values(
+ sudo_set_hyperparameter_values(
local_chain,
alice_wallet,
call_function="sudo_set_alpha_values",
diff --git a/tests/e2e_tests/test_metagraph.py b/tests/e2e_tests/test_metagraph.py
index ff16dde369..8999b30358 100644
--- a/tests/e2e_tests/test_metagraph.py
+++ b/tests/e2e_tests/test_metagraph.py
@@ -1,7 +1,8 @@
import time
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
register_neuron,
@@ -64,7 +65,7 @@ def test_metagraph(local_chain):
).serialize(), "Subnet wasn't created successfully"
# Initialize metagraph
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
metagraph = subtensor.metagraph(netuid=1)
# Assert metagraph is empty
@@ -129,17 +130,17 @@ def test_metagraph(local_chain):
# Test staking with low balance
assert not add_stake(
- local_chain, dave_wallet, bittensor.Balance.from_tao(10_000)
+ local_chain, dave_wallet, Balance.from_tao(10_000)
), "Low balance stake should fail"
# Add stake by Bob
assert add_stake(
- local_chain, bob_wallet, bittensor.Balance.from_tao(10_000)
+ local_chain, bob_wallet, Balance.from_tao(10_000)
), "Failed to add stake for Bob"
# Assert stake is added after updating metagraph
metagraph.sync(subtensor=subtensor)
- assert metagraph.neurons[0].stake == bittensor.Balance.from_tao(
+ assert metagraph.neurons[0].stake == Balance.from_tao(
10_000
), "Bob's stake not updated in metagraph"
diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py
index 32d0f6e14d..ffa7b716ee 100644
--- a/tests/e2e_tests/test_subtensor_functions.py
+++ b/tests/e2e_tests/test_subtensor_functions.py
@@ -3,8 +3,8 @@
import pytest
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
register_neuron,
register_subnet,
@@ -31,7 +31,7 @@ async def test_subtensor_extrinsics(local_chain):
AssertionError: If any of the checks or verifications fail
"""
netuid = 1
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
# Subnets 0 and 3 are bootstrapped from the start
assert subtensor.get_subnets() == [0, 3]
@@ -139,7 +139,7 @@ async def test_subtensor_extrinsics(local_chain):
await asyncio.sleep(
5
) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
# Verify neuron info is updated after running as a validator
neuron_info = subtensor.get_neuron_for_pubkey_and_subnet(
diff --git a/tests/e2e_tests/utils/chain_interactions.py b/tests/e2e_tests/utils/chain_interactions.py
index aad53812c8..20e4a65dea 100644
--- a/tests/e2e_tests/utils/chain_interactions.py
+++ b/tests/e2e_tests/utils/chain_interactions.py
@@ -6,7 +6,7 @@
import asyncio
from typing import Union, Optional, TYPE_CHECKING
-from bittensor import logging
+from bittensor.utils.btlogging import logging
# for typing purposes
if TYPE_CHECKING:
diff --git a/tests/unit_tests/extrinsics/test_init.py b/tests/unit_tests/extrinsics/test_init.py
index 8a2480a9b9..8ff60d2de6 100644
--- a/tests/unit_tests/extrinsics/test_init.py
+++ b/tests/unit_tests/extrinsics/test_init.py
@@ -1,9 +1,10 @@
"""Tests for bittensor/extrinsics/__ini__ module."""
from bittensor.utils import format_error_message
+from tests.unit_tests.extrinsics.test_commit_weights import subtensor
-def test_format_error_message_with_right_error_message():
+def test_format_error_message_with_right_error_message(mocker):
"""Verify that error message from extrinsic response parses correctly."""
# Prep
fake_error_message = {
@@ -13,7 +14,7 @@ def test_format_error_message_with_right_error_message():
}
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
@@ -22,13 +23,13 @@ def test_format_error_message_with_right_error_message():
assert "Some error description." in result
-def test_format_error_message_with_empty_error_message():
+def test_format_error_message_with_empty_error_message(mocker):
"""Verify that empty error message from extrinsic response parses correctly."""
# Prep
fake_error_message = {}
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
@@ -37,13 +38,13 @@ def test_format_error_message_with_empty_error_message():
assert "Unknown Description" in result
-def test_format_error_message_with_wrong_type_error_message():
+def test_format_error_message_with_wrong_type_error_message(mocker):
"""Verify that error message from extrinsic response with wrong type parses correctly."""
# Prep
fake_error_message = None
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions