diff --git a/openvalidators/reward/diversity.py b/openvalidators/reward/diversity.py index a7629a7..52eebd5 100644 --- a/openvalidators/reward/diversity.py +++ b/openvalidators/reward/diversity.py @@ -67,6 +67,7 @@ def get_embeddings( self, sentences: List[str] ) -> "torch.FloatTensor": Embedding for the message. """ # Tokenizing sentences + encoded_input = self.tokenizer( sentences, padding=True, @@ -87,6 +88,10 @@ def get_embeddings( self, sentences: List[str] ) -> "torch.FloatTensor": def get_rewards( self, prompt: str, completions: List[str], name: str ) -> torch.FloatTensor: + # Check if completions are empty, return 0 if so + if len(completions) == 0: + return torch.tensor([]) + # Get embeddings for all completions. embeddings = self.get_embeddings( completions ) diff --git a/openvalidators/utils.py b/openvalidators/utils.py index 341fbde..e712873 100644 --- a/openvalidators/utils.py +++ b/openvalidators/utils.py @@ -32,7 +32,11 @@ def should_reinit_wandb(self): def init_wandb(self, reinit=False): """Starts a new wandb run.""" - tags = [self.wallet.hotkey.ss58_address, openvalidators.__version__, str(openvalidators.__spec_version__)] + tags = [self.wallet.hotkey.ss58_address, + openvalidators.__version__, + str(openvalidators.__spec_version__), + f'netuid_{self.metagraph.netuid}'] + if self.config.mock: tags.append("mock") if self.config.neuron.use_custom_gating_model: