From 1632c98b6bfde7bee0da36d9e797387c4b461602 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sun, 4 Aug 2024 14:53:35 +0200 Subject: [PATCH 1/7] fix typo --- src/diffusers/models/controlnet_xs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/controlnet_xs.py b/src/diffusers/models/controlnet_xs.py index 0fa21755f09c..f676a70f060a 100644 --- a/src/diffusers/models/controlnet_xs.py +++ b/src/diffusers/models/controlnet_xs.py @@ -285,7 +285,7 @@ class ControlNetXSAdapter(ModelMixin, ConfigMixin): upcast_attention (`bool`, defaults to `True`): Whether the attention computation should always be upcasted. max_norm_num_groups (`int`, defaults to 32): - Maximum number of groups in group normal. The actual number will the the largest divisor of the respective + Maximum number of groups in group normal. The actual number will be the largest divisor of the respective channels, that is <= max_norm_num_groups. """ From 99e4aba34b3a50bd07a662e571d1ca4647c6817c Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sun, 4 Aug 2024 14:57:10 +0200 Subject: [PATCH 2/7] fix typos --- src/diffusers/pipelines/pag/pag_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/pag/pag_utils.py b/src/diffusers/pipelines/pag/pag_utils.py index 7c9bb2d098d2..71a4abfe6221 100644 --- a/src/diffusers/pipelines/pag/pag_utils.py +++ b/src/diffusers/pipelines/pag/pag_utils.py @@ -84,7 +84,7 @@ def get_block_type(module_name): def get_block_index(module_name): r""" Get the block index from the module name. Can be "block_0", "block_1", ... If there is only one block (e.g. - mid_block) and index is ommited from the name, it will be "block_0". + mid_block) and index is omitted from the name, it will be "block_0". """ # down_blocks.1.attentions.0.transformer_blocks.0.attn1 -> "block_1" # mid_block.attentions.0.transformer_blocks.0.attn1 -> "block_0" @@ -223,7 +223,7 @@ def _prepare_perturbed_attention_guidance(self, cond, uncond, do_classifier_free def set_pag_applied_layers(self, pag_applied_layers): r""" - set the the self-attention layers to apply PAG. Raise ValueError if the input is invalid. + set the self-attention layers to apply PAG. Raise ValueError if the input is invalid. """ if not isinstance(pag_applied_layers, list): @@ -315,7 +315,7 @@ def is_self_attn(module_name): def get_block_index(module_name): r""" Get the block index from the module name. can be "block_0", "block_1", ... If there is only one block (e.g. - mid_block) and index is ommited from the name, it will be "block_0". + mid_block) and index is omitted from the name, it will be "block_0". """ # transformer_blocks.23.attn -> "23" return module_name.split(".")[1] @@ -339,7 +339,7 @@ def get_block_index(module_name): # Copied from diffusers.pipelines.pag.pag_utils.PAGMixin.set_pag_applied_layers def set_pag_applied_layers(self, pag_applied_layers): r""" - set the the self-attention layers to apply PAG. Raise ValueError if the input is invalid. + set the self-attention layers to apply PAG. Raise ValueError if the input is invalid. """ if not isinstance(pag_applied_layers, list): From e1266d5ca5f2a66cc66f46266307778fd433fba5 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sun, 4 Aug 2024 14:58:03 +0200 Subject: [PATCH 3/7] fix typos --- examples/research_projects/sdxl_flax/sdxl_single_aot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/research_projects/sdxl_flax/sdxl_single_aot.py b/examples/research_projects/sdxl_flax/sdxl_single_aot.py index 58447fd86daf..08bd13902aa9 100644 --- a/examples/research_projects/sdxl_flax/sdxl_single_aot.py +++ b/examples/research_projects/sdxl_flax/sdxl_single_aot.py @@ -18,7 +18,7 @@ NUM_DEVICES = jax.device_count() # 1. Let's start by downloading the model and loading it into our pipeline class -# Adhering to JAX's functional approach, the model's parameters are returned seperatetely and +# Adhering to JAX's functional approach, the model's parameters are returned separately and # will have to be passed to the pipeline during inference pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True @@ -69,7 +69,7 @@ def replicate_all(prompt_ids, neg_prompt_ids, seed): # to the function and tell JAX which are static arguments, that is, arguments that # are known at compile time and won't change. In our case, it is num_inference_steps, # height, width and return_latents. -# Once the function is compiled, these parameters are ommited from future calls and +# Once the function is compiled, these parameters are omitted from future calls and # cannot be changed without modifying the code and recompiling. def aot_compile( prompt=default_prompt, From f87f77b22fb9fa0d4d97535e9b26501c8db6c2a3 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sun, 4 Aug 2024 14:59:41 +0200 Subject: [PATCH 4/7] fix typos --- docs/source/en/training/instructpix2pix.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/en/training/instructpix2pix.md b/docs/source/en/training/instructpix2pix.md index 3f797ced497d..3a651e5abd2d 100644 --- a/docs/source/en/training/instructpix2pix.md +++ b/docs/source/en/training/instructpix2pix.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [InstructPix2Pix](https://hf.co/papers/2211.09800) is a Stable Diffusion model trained to edit images from human-provided instructions. For example, your prompt can be "turn the clouds rainy" and the model will edit the input image accordingly. This model is conditioned on the text prompt (or editing instruction) and the input image. -This guide will explore the [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. +This guide will explore the [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) training script to help you become familiar with it, and how you can adapt it for your own use case. Before running the script, make sure you install the library from source: @@ -117,7 +117,7 @@ optimizer = optimizer_cls( ) ``` -Next, the edited images and and edit instructions are [preprocessed](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624) and [tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24). It is important the same image transformations are applied to the original and edited images. +Next, the edited images and edit instructions are [preprocessed](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624) and [tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24). It is important the same image transformations are applied to the original and edited images. ```py def preprocess_train(examples): @@ -249,4 +249,4 @@ The SDXL training script is discussed in more detail in the [SDXL training](sdxl Congratulations on training your own InstructPix2Pix model! 🥳 To learn more about the model, it may be helpful to: -- Read the [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) blog post to learn more about some experiments we've done with InstructPix2Pix, dataset preparation, and results for different instructions. \ No newline at end of file +- Read the [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) blog post to learn more about some experiments we've done with InstructPix2Pix, dataset preparation, and results for different instructions. From 33330ec2250769b9d158e57db5e19792e04d89f5 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sun, 4 Aug 2024 15:01:34 +0200 Subject: [PATCH 5/7] fix typo --- docs/source/en/using-diffusers/callback.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/using-diffusers/callback.md b/docs/source/en/using-diffusers/callback.md index 2ed74ab80dbf..ce4c6d1b98c8 100644 --- a/docs/source/en/using-diffusers/callback.md +++ b/docs/source/en/using-diffusers/callback.md @@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License. # Pipeline callbacks -The denoising loop of a pipeline can be modified with custom defined functions using the `callback_on_step_end` parameter. The callback function is executed at the end of each step, and modifies the pipeline attributes and variables for the next step. This is really useful for *dynamically* adjusting certain pipeline attributes or modifying tensor variables. This versatility allows for interesting use-cases such as changing the prompt embeddings at each timestep, assigning different weights to the prompt embeddings, and editing the guidance scale. With callbacks, you can implement new features without modifying the underlying code! +The denoising loop of a pipeline can be modified with custom defined functions using the `callback_on_step_end` parameter. The callback function is executed at the end of each step, and modifies the pipeline attributes and variables for the next step. This is really useful for *dynamically* adjusting certain pipeline attributes or modifying tensor variables. This versatility allows for interesting use cases such as changing the prompt embeddings at each timestep, assigning different weights to the prompt embeddings, and editing the guidance scale. With callbacks, you can implement new features without modifying the underlying code! > [!TIP] > 🤗 Diffusers currently only supports `callback_on_step_end`, but feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you have a cool use-case and require a callback function with a different execution point! @@ -75,7 +75,7 @@ out.images[0].save("official_callback.png")
without SDXLCFGCutoffCallback
- generated image of a a sports car at the road with cfg callback + generated image of a sports car at the road with cfg callback
with SDXLCFGCutoffCallback
From 6c3fa99344939fd1491dadc7a3fdab5cc9a56365 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sun, 4 Aug 2024 15:04:41 +0200 Subject: [PATCH 6/7] fix typo --- docs/source/en/tutorials/using_peft_for_inference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/tutorials/using_peft_for_inference.md b/docs/source/en/tutorials/using_peft_for_inference.md index c37dd90fa172..907f93d573a0 100644 --- a/docs/source/en/tutorials/using_peft_for_inference.md +++ b/docs/source/en/tutorials/using_peft_for_inference.md @@ -34,7 +34,7 @@ pipe_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = DiffusionPipeline.from_pretrained(pipe_id, torch_dtype=torch.float16).to("cuda") ``` -Next, load a [CiroN2022/toy-face](https://huggingface.co/CiroN2022/toy-face) adapter with the [`~diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] method. With the 🤗 PEFT integration, you can assign a specific `adapter_name` to the checkpoint, which let's you easily switch between different LoRA checkpoints. Let's call this adapter `"toy"`. +Next, load a [CiroN2022/toy-face](https://huggingface.co/CiroN2022/toy-face) adapter with the [`~diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] method. With the 🤗 PEFT integration, you can assign a specific `adapter_name` to the checkpoint, which lets you easily switch between different LoRA checkpoints. Let's call this adapter `"toy"`. ```python pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy") From 91405b63d739b8dfd88222b396beaacfa74aab60 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 5 Aug 2024 11:27:55 +0200 Subject: [PATCH 7/7] Revert typo fix --- src/diffusers/pipelines/pag/pag_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/pag/pag_utils.py b/src/diffusers/pipelines/pag/pag_utils.py index 71a4abfe6221..7c9bb2d098d2 100644 --- a/src/diffusers/pipelines/pag/pag_utils.py +++ b/src/diffusers/pipelines/pag/pag_utils.py @@ -84,7 +84,7 @@ def get_block_type(module_name): def get_block_index(module_name): r""" Get the block index from the module name. Can be "block_0", "block_1", ... If there is only one block (e.g. - mid_block) and index is omitted from the name, it will be "block_0". + mid_block) and index is ommited from the name, it will be "block_0". """ # down_blocks.1.attentions.0.transformer_blocks.0.attn1 -> "block_1" # mid_block.attentions.0.transformer_blocks.0.attn1 -> "block_0" @@ -223,7 +223,7 @@ def _prepare_perturbed_attention_guidance(self, cond, uncond, do_classifier_free def set_pag_applied_layers(self, pag_applied_layers): r""" - set the self-attention layers to apply PAG. Raise ValueError if the input is invalid. + set the the self-attention layers to apply PAG. Raise ValueError if the input is invalid. """ if not isinstance(pag_applied_layers, list): @@ -315,7 +315,7 @@ def is_self_attn(module_name): def get_block_index(module_name): r""" Get the block index from the module name. can be "block_0", "block_1", ... If there is only one block (e.g. - mid_block) and index is omitted from the name, it will be "block_0". + mid_block) and index is ommited from the name, it will be "block_0". """ # transformer_blocks.23.attn -> "23" return module_name.split(".")[1] @@ -339,7 +339,7 @@ def get_block_index(module_name): # Copied from diffusers.pipelines.pag.pag_utils.PAGMixin.set_pag_applied_layers def set_pag_applied_layers(self, pag_applied_layers): r""" - set the self-attention layers to apply PAG. Raise ValueError if the input is invalid. + set the the self-attention layers to apply PAG. Raise ValueError if the input is invalid. """ if not isinstance(pag_applied_layers, list):