From a19ade6617693178942315bf29d57eb650f35293 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 20 Apr 2023 13:04:29 +0200 Subject: [PATCH 1/2] Fix docs text inversion --- src/diffusers/loaders.py | 4 ++-- tests/models/test_lora_layers.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 82c1ac61ca9e..875fe81b7ddd 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -511,7 +511,7 @@ def load_textual_inversion( model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") - pipe.load_textual_inversion("./charturnerv2.pt") + pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2") prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details." @@ -848,7 +848,7 @@ def _modify_text_encoder(self, attn_processors: Dict[str, LoRAAttnProcessor]): """ # Loop over the original attention modules. for name, _ in self.text_encoder.named_modules(): - if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]): + if any(x in name for x in TEXT_ENCODER_TARGET_MODULES): # Retrieve the module and its corresponding LoRA processor. module = self.text_encoder.get_submodule(name) # Construct a new function that performs the LoRA merging. We will monkey patch diff --git a/tests/models/test_lora_layers.py b/tests/models/test_lora_layers.py index 6f75902d388f..6f1e85e15558 100644 --- a/tests/models/test_lora_layers.py +++ b/tests/models/test_lora_layers.py @@ -46,7 +46,7 @@ def create_unet_lora_layers(unet: nn.Module): def create_text_encoder_lora_layers(text_encoder: nn.Module): text_lora_attn_procs = {} for name, module in text_encoder.named_modules(): - if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]): + if any(x in name for x in TEXT_ENCODER_TARGET_MODULES): text_lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=module.out_features, cross_attention_dim=None) text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs) return text_encoder_lora_layers From a2dc95c26bcfea67592ac756d0d3f9e774e5efc6 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 20 Apr 2023 12:05:29 +0100 Subject: [PATCH 2/2] Apply suggestions from code review --- src/diffusers/loaders.py | 2 +- tests/models/test_lora_layers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 875fe81b7ddd..da8bc03bf2e5 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -848,7 +848,7 @@ def _modify_text_encoder(self, attn_processors: Dict[str, LoRAAttnProcessor]): """ # Loop over the original attention modules. for name, _ in self.text_encoder.named_modules(): - if any(x in name for x in TEXT_ENCODER_TARGET_MODULES): + if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]): # Retrieve the module and its corresponding LoRA processor. module = self.text_encoder.get_submodule(name) # Construct a new function that performs the LoRA merging. We will monkey patch diff --git a/tests/models/test_lora_layers.py b/tests/models/test_lora_layers.py index 6f1e85e15558..6f75902d388f 100644 --- a/tests/models/test_lora_layers.py +++ b/tests/models/test_lora_layers.py @@ -46,7 +46,7 @@ def create_unet_lora_layers(unet: nn.Module): def create_text_encoder_lora_layers(text_encoder: nn.Module): text_lora_attn_procs = {} for name, module in text_encoder.named_modules(): - if any(x in name for x in TEXT_ENCODER_TARGET_MODULES): + if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]): text_lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=module.out_features, cross_attention_dim=None) text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs) return text_encoder_lora_layers