From da02cee09cc93cdd9d775305fec07100f3ec619f Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Sat, 22 Jul 2023 09:27:31 +0200 Subject: [PATCH 01/24] Add combined pipeline --- .../pipeline_kandinsky2_2_combined.py | 222 ++++++++++++++++++ 1 file changed, 222 insertions(+) create mode 100644 src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py new file mode 100644 index 000000000000..976c55a1d3ce --- /dev/null +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -0,0 +1,222 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Union + +import torch + +from .pipeline_kandinsky2_2 import KandinskyV22Pipeline +from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline + +from ...models import UNet2DConditionModel, VQModel, PriorTransformer +from ...pipelines import DiffusionPipeline +from ...schedulers import UnCLIPScheduler +from transformers import CLIPVisionModelWithProjection, CLIPTextModelWithProjection, CLIPImageProcessor, CLIPTokenizer +from ...pipelines.pipeline_utils import ImagePipelineOutput +from ...schedulers import DDPMScheduler +from ...utils import ( + logging, + randn_tensor, + replace_example_docstring, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") + >>> pipe_prior.to("cuda") + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> zero_image_emb = out.negative_image_embeds + >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images + >>> image[0].save("cat.png") + ``` +""" + + +class KandinskyV22CombinedPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22Pipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False + ) + # TODO offload prior pipeline completetly if necessary + outputs = self.decoder_pipe( + image_embeds=prior_outputs[0], + negative_image_embeds=prior_outputs[1], + width=width, + height=height, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + ) + + return outputs From a89153d8adc83a0b387d2da77acc03dd7f579429 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Sat, 22 Jul 2023 09:38:12 +0200 Subject: [PATCH 02/24] Download readme --- src/diffusers/pipelines/pipeline_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 3d827596d508..70a8b8daaa3f 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -28,6 +28,7 @@ import numpy as np import PIL import torch +from huggingface_hub import ModelCard from huggingface_hub import hf_hub_download, model_info, snapshot_download from packaging import version from requests.exceptions import HTTPError @@ -78,6 +79,7 @@ CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" DUMMY_MODULES_FOLDER = "diffusers.utils" TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" +CONNECTED_PIPES_KEYS = ["prior"] logger = logging.get_logger(__name__) @@ -877,6 +879,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False) + load_connected_pipes = kwargs.pop("load_connected_pipes", False) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained @@ -895,6 +898,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P custom_pipeline=custom_pipeline, custom_revision=custom_revision, variant=variant, + load_connected_pipes=load_connected_pipes, **kwargs, ) else: @@ -1233,6 +1237,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: custom_revision = kwargs.pop("custom_revision", None) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) + load_connected_pipes = kwargs.pop("load_connected_pipes", False) if use_safetensors and not is_safetensors_available(): raise ValueError( @@ -1388,6 +1393,9 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: if custom_pipeline is not None and not custom_pipeline.endswith(".py"): user_agent["custom_pipeline"] = custom_pipeline + if load_connected_pipes: + allow_patterns.append(["README.md"]) + # download all allow_patterns - ignore_patterns cached_folder = snapshot_download( pretrained_model_name, From 1cb7cd5ab20489583efcc190ddf492469bd655ee Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Sat, 22 Jul 2023 07:46:31 +0000 Subject: [PATCH 03/24] Upload --- src/diffusers/__init__.py | 1 + src/diffusers/pipelines/kandinsky2_2/__init__.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index c7c7ac6fe859..6350b8246298 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -149,6 +149,7 @@ KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, + KandinskyV22CombinedPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, diff --git a/src/diffusers/pipelines/kandinsky2_2/__init__.py b/src/diffusers/pipelines/kandinsky2_2/__init__.py index 648164b9f1ba..eb51d71ff6b9 100644 --- a/src/diffusers/pipelines/kandinsky2_2/__init__.py +++ b/src/diffusers/pipelines/kandinsky2_2/__init__.py @@ -5,3 +5,4 @@ from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline +from .pipeline_kandinsky2_2_combined import KandinskyV22CombinedPipeline From 13ec934b0230a94318b23d874daeb0c4dfd375cb Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 24 Jul 2023 14:21:02 +0000 Subject: [PATCH 04/24] up --- src/diffusers/pipelines/__init__.py | 1 + .../pipeline_kandinsky2_2_combined.py | 1 + src/diffusers/pipelines/pipeline_utils.py | 21 ++++++++++++------- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 802ae4f5bc94..7f2a01fa5a9b 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -73,6 +73,7 @@ KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, + KandinskyV22CombinedPipeline, ) from .latent_diffusion import LDMTextToImagePipeline from .paint_by_example import PaintByExamplePipeline diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index 976c55a1d3ce..59951c398d1c 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -75,6 +75,7 @@ class KandinskyV22CombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. """ + _load_connected_pipes = True def __init__( self, diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 70a8b8daaa3f..6ee6716d03e4 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -477,6 +477,7 @@ class DiffusionPipeline(ConfigMixin): config_name = "model_index.json" _optional_components = [] _exclude_from_cpu_offload = [] + _load_connected_pipes = False def register_modules(self, **kwargs): # import it here to avoid circular import @@ -879,7 +880,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False) - load_connected_pipes = kwargs.pop("load_connected_pipes", False) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained @@ -898,7 +898,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P custom_pipeline=custom_pipeline, custom_revision=custom_revision, variant=variant, - load_connected_pipes=load_connected_pipes, **kwargs, ) else: @@ -1237,7 +1236,6 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: custom_revision = kwargs.pop("custom_revision", None) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) - load_connected_pipes = kwargs.pop("load_connected_pipes", False) if use_safetensors and not is_safetensors_available(): raise ValueError( @@ -1372,6 +1370,10 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: allow_patterns = [ p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components) ] + + if cls._load_connected_pipes: + allow_patterns.append("README.md") + # Don't download index files of forbidden patterns either ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns] @@ -1384,18 +1386,15 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: snapshot_folder = Path(config_file).parent pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) - if pipeline_is_cached and not force_download: + # if pipeline_is_cached and not force_download: # if the pipeline is cached, we can directly return it # else call snapshot_download - return snapshot_folder + # return snapshot_folder user_agent = {"pipeline_class": cls.__name__} if custom_pipeline is not None and not custom_pipeline.endswith(".py"): user_agent["custom_pipeline"] = custom_pipeline - if load_connected_pipes: - allow_patterns.append(["README.md"]) - # download all allow_patterns - ignore_patterns cached_folder = snapshot_download( pretrained_model_name, @@ -1410,6 +1409,12 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: user_agent=user_agent, ) + if cls._load_connected_pipes: + modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) + connected_pipes = sum([getattr(modelcard, k, []) for k in CONNECTED_PIPES_KEYS], []) + for connected_pipe_repo_id in connected_pipes: + DiffusionPipeline.download(connected_pipe_repo_id) + return cached_folder @staticmethod From e6d5bc60adf8ddd46fc3de25ca38fe28d5307bf9 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 16:37:10 +0200 Subject: [PATCH 05/24] up --- src/diffusers/__init__.py | 7 +++- src/diffusers/pipelines/__init__.py | 7 +++- src/diffusers/pipelines/auto_pipeline.py | 20 +++++++----- src/diffusers/pipelines/kandinsky/__init__.py | 5 +++ .../kandinsky/pipeline_kandinsky_combined.py | 26 +++++++++++++++ .../pipelines/kandinsky2_2/__init__.py | 6 +++- .../pipeline_kandinsky2_2_combined.py | 24 ++++++++------ src/diffusers/pipelines/pipeline_utils.py | 32 +++++++++---------- 8 files changed, 89 insertions(+), 38 deletions(-) create mode 100644 src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index fdbff82a3d95..2ccc9a8bc22d 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -141,18 +141,23 @@ IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, + KandinskyV22CombinedPipeline, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22ControlnetPipeline, + KandinskyV22Img2ImgCombinedPipeline, KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, - KandinskyV22TextToImagePipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 3d5ebf9abbc5..2bc15de530a4 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -61,20 +61,25 @@ IFSuperResolutionPipeline, ) from .kandinsky import ( + KandinskyCombinedPipeline, + KandinskyImg2Img2CombinedPipeline, KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, ) from .kandinsky2_2 import ( + KandinskyV22CombinedPipeline, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22ControlnetPipeline, + KandinskyV22Img2Img2CombinedPipeline, KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, - KandinskyV22TextToImagePipeline, ) from .latent_diffusion import LDMTextToImagePipeline from .paint_by_example import PaintByExamplePipeline diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index c827231ada7d..9f2a8828e930 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -24,8 +24,12 @@ StableDiffusionXLControlNetPipeline, ) from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline -from .kandinsky import KandinskyImg2ImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline -from .kandinsky2_2 import KandinskyV22Img2ImgPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline +from .kandinsky import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline +from .kandinsky22 import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, +) from .stable_diffusion import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, @@ -43,8 +47,8 @@ ("stable-diffusion", StableDiffusionPipeline), ("stable-diffusion-xl", StableDiffusionXLPipeline), ("if", IFPipeline), - ("kandinsky", KandinskyPipeline), - ("kandinsky22", KandinskyV22Pipeline), + ("kandinsky", KandinskyCombinedPipeline), + ("kandinsky22", KandinskyV22CombinedPipeline), ("stable-diffusion-controlnet", StableDiffusionControlNetPipeline), ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetPipeline), ] @@ -55,8 +59,8 @@ ("stable-diffusion", StableDiffusionImg2ImgPipeline), ("stable-diffusion-xl", StableDiffusionXLImg2ImgPipeline), ("if", IFImg2ImgPipeline), - ("kandinsky", KandinskyImg2ImgPipeline), - ("kandinsky22", KandinskyV22Img2ImgPipeline), + ("kandinsky", KandinskyImg2ImgCombinedPipeline), + ("kandinsky22", KandinskyV22Img2ImgCombinedPipeline), ("stable-diffusion-controlnet", StableDiffusionControlNetImg2ImgPipeline), ] ) @@ -66,8 +70,8 @@ ("stable-diffusion", StableDiffusionInpaintPipeline), ("stable-diffusion-xl", StableDiffusionXLInpaintPipeline), ("if", IFInpaintingPipeline), - ("kandinsky", KandinskyInpaintPipeline), - ("kandinsky22", KandinskyV22InpaintPipeline), + ("kandinsky", KandinskyInpaintCombinedPipeline), + ("kandinsky22", KandinskyV22InpaintCombinedPipeline), ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline), ] ) diff --git a/src/diffusers/pipelines/kandinsky/__init__.py b/src/diffusers/pipelines/kandinsky/__init__.py index 242ff799e529..01e81b77ff15 100644 --- a/src/diffusers/pipelines/kandinsky/__init__.py +++ b/src/diffusers/pipelines/kandinsky/__init__.py @@ -13,6 +13,11 @@ from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline + from .pipeline_kandinsky_combined import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyInpaintCombinedPipeline, + ) from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py new file mode 100644 index 000000000000..2e50c2c40100 --- /dev/null +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -0,0 +1,26 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ...pipelines import DiffusionPipeline + + +class KandinskyCombinedPipeline(DiffusionPipeline): + pass + + +class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): + pass + + +class KandinskyInpaintCombinedPipeline(DiffusionPipeline): + pass diff --git a/src/diffusers/pipelines/kandinsky2_2/__init__.py b/src/diffusers/pipelines/kandinsky2_2/__init__.py index 5ec401d9cab5..5d9a15767ffa 100644 --- a/src/diffusers/pipelines/kandinsky2_2/__init__.py +++ b/src/diffusers/pipelines/kandinsky2_2/__init__.py @@ -1,8 +1,12 @@ from .pipeline_kandinsky2_2 import KandinskyV22Pipeline +from .pipeline_kandinsky2_2_combined import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, +) from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline -from .pipeline_kandinsky2_2_combined import KandinskyV22TextToImagePipeline diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index a27d0e9c9a04..22fdb7e5683d 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -15,21 +15,17 @@ from typing import List, Optional, Union import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection -from .pipeline_kandinsky2_2 import KandinskyV22Pipeline -from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline - -from ...models import UNet2DConditionModel, VQModel, PriorTransformer +from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...pipelines import DiffusionPipeline -from ...schedulers import UnCLIPScheduler -from transformers import CLIPVisionModelWithProjection, CLIPTextModelWithProjection, CLIPImageProcessor, CLIPTokenizer -from ...pipelines.pipeline_utils import ImagePipelineOutput -from ...schedulers import DDPMScheduler +from ...schedulers import DDPMScheduler, UnCLIPScheduler from ...utils import ( logging, - randn_tensor, replace_example_docstring, ) +from .pipeline_kandinsky2_2 import KandinskyV22Pipeline +from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -60,7 +56,7 @@ """ -class KandinskyV22TextToImagePipeline(DiffusionPipeline): +class KandinskyV22CombinedPipeline(DiffusionPipeline): """ Pipeline for text-to-image generation using Kandinsky @@ -222,3 +218,11 @@ def __call__( ) return outputs + + +class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): + pass + + +class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): + pass diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 1d0e2d2f8375..ab300047924c 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -28,8 +28,7 @@ import numpy as np import PIL import torch -from huggingface_hub import ModelCard -from huggingface_hub import hf_hub_download, model_info, snapshot_download +from huggingface_hub import ModelCard, hf_hub_download, model_info, snapshot_download from packaging import version from requests.exceptions import HTTPError from tqdm.auto import tqdm @@ -1245,7 +1244,6 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: use_safetensors = is_safetensors_available() allow_pickle = True - pipeline_is_cached = False allow_patterns = None ignore_patterns = None @@ -1384,12 +1382,12 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)] snapshot_folder = Path(config_file).parent - pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) + all((snapshot_folder / f).is_file() for f in expected_files) # if pipeline_is_cached and not force_download: - # if the pipeline is cached, we can directly return it - # else call snapshot_download - # return snapshot_folder + # if the pipeline is cached, we can directly return it + # else call snapshot_download + # return snapshot_folder user_agent = {"pipeline_class": cls.__name__} if custom_pipeline is not None and not custom_pipeline.endswith(".py"): @@ -1398,16 +1396,16 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: # download all allow_patterns - ignore_patterns try: cached_folder = snapshot_download( - pretrained_model_name, - cache_dir=cache_dir, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - allow_patterns=allow_patterns, - ignore_patterns=ignore_patterns, - user_agent=user_agent, + pretrained_model_name, + cache_dir=cache_dir, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, ) if cls._load_connected_pipes: From e06c1ca704d5bfabe7c4e15972136a2e19e32487 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 15:02:18 +0000 Subject: [PATCH 06/24] fix final --- src/diffusers/pipelines/pipeline_utils.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 6ee6716d03e4..33a6d6cf7aac 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1066,6 +1066,15 @@ def load_module(name, value): init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) + if pipeline_class._load_connected_pipes: + modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) + connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} + connected_pipes = {prefix: DiffusionPipeline.from_pretrained(repo_id) for prefix, repo_id in connected_pipes.items() if repo_id is not None} + + for prefix, connected_pipe in connected_pipes.items(): + # add connected pipes to `init_kwargs` with _, e.g. "prior_" + init_kwargs.update({"_".join([prefix, name]): component for name, component in connected_pipe.components.items()}) + # 7. Potentially add passed objects if expected missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) @@ -1371,7 +1380,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components) ] - if cls._load_connected_pipes: + if pipeline_class._load_connected_pipes: allow_patterns.append("README.md") # Don't download index files of forbidden patterns either @@ -1409,9 +1418,9 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: user_agent=user_agent, ) - if cls._load_connected_pipes: + if pipeline_class._load_connected_pipes: modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) - connected_pipes = sum([getattr(modelcard, k, []) for k in CONNECTED_PIPES_KEYS], []) + connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], []) for connected_pipe_repo_id in connected_pipes: DiffusionPipeline.download(connected_pipe_repo_id) From af58ae525718714dedfddae275df7227b087f784 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 15:44:03 +0000 Subject: [PATCH 07/24] Add enable model cpu offload kandinsky --- .../kandinsky2_2/pipeline_kandinsky2_2.py | 4 ++ .../pipeline_kandinsky2_2_combined.py | 25 +----------- .../pipeline_kandinsky2_2_controlnet.py | 4 ++ ...ipeline_kandinsky2_2_controlnet_img2img.py | 4 ++ .../pipeline_kandinsky2_2_img2img.py | 4 ++ .../pipeline_kandinsky2_2_inpainting.py | 4 ++ .../pipeline_kandinsky2_2_prior.py | 39 ++++++++++++++++++- .../pipeline_kandinsky2_2_prior_emb2emb.py | 38 +++++++++++++++++- 8 files changed, 97 insertions(+), 25 deletions(-) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index e30df96becb3..d36471fd08aa 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -261,6 +261,10 @@ def __call__( # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index 46ee4234b3ec..33681e1476e4 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -30,28 +30,9 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name -EXAMPLE_DOC_STRING = """ +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py - >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline - >>> import torch - - >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") - >>> pipe_prior.to("cuda") - >>> prompt = "red cat, 4k photo" - >>> out = pipe_prior(prompt) - >>> image_emb = out.image_embeds - >>> zero_image_emb = out.negative_image_embeds - >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") - >>> pipe.to("cuda") - >>> image = pipe( - ... image_embeds=image_emb, - ... negative_image_embeds=zero_image_emb, - ... height=768, - ... width=768, - ... num_inference_steps=50, - ... ).images - >>> image[0].save("cat.png") ``` """ @@ -124,7 +105,7 @@ def enable_model_cpu_offload(self, gpu_id=0): self.decoder_pipe.enable_model_cpu_offload() @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING ) def __call__( self, prompt: Union[str, List[str]], @@ -203,7 +184,6 @@ def __call__( output_type="pt", return_dict=False, ) - # TODO offload prior pipeline completetly if necessary outputs = self.decoder_pipe( image_embeds=prior_outputs[0], negative_image_embeds=prior_outputs[1], @@ -216,7 +196,6 @@ def __call__( output_type=output_type, return_dict=return_dict, ) - return outputs diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index 8ae251a5676f..feaa51309158 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -316,6 +316,10 @@ def __call__( # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py index 30638a163a8e..4690cf49af14 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py @@ -377,6 +377,10 @@ def __call__( # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py index 423837a40027..1993e44c4c64 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py @@ -341,6 +341,10 @@ def __call__( # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 18a5041f937e..76f1bdbbc5ea 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -474,6 +474,10 @@ def __call__( latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents image = self.movq.decode(latents, force_not_quantize=True)["sample"] + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index b6ab2ca3fc23..fa9e17dcb4b0 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -10,6 +10,8 @@ logging, randn_tensor, replace_example_docstring, + is_accelerate_available, + is_accelerate_version, ) from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline @@ -137,7 +139,7 @@ def interpolate( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, negative_prior_prompt: Optional[str] = None, - negative_prompt: Union[str] = "", + negative_prompt: str = "", guidance_scale: float = 4.0, device=None, ): @@ -353,6 +355,35 @@ def _encode_prompt( return prompt_embeds, text_encoder_hidden_states, text_mask + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + hook = None + for cpu_offloaded_model in [self.text_encoder, self.prior]: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.prior_hook = hook + + _, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook) + + self.final_offload_hook = hook + @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( @@ -485,9 +516,15 @@ def __call__( # if negative prompt has been defined, we retrieve split the image embedding into two if negative_prompt is None: zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() else: image_embeddings, zero_embeds = image_embeddings.chunk(2) + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.prior_hook.offload() + if output_type not in ["pt", "np"]: raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index 75be6e54c93f..afd3003668f2 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -10,6 +10,9 @@ logging, randn_tensor, replace_example_docstring, + is_accelerate_available, + is_accelerate_version, + ) from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline @@ -162,7 +165,7 @@ def interpolate( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, negative_prior_prompt: Optional[str] = None, - negative_prompt: Union[str] = "", + negative_prompt: str = "", guidance_scale: float = 4.0, device=None, ): @@ -392,6 +395,35 @@ def _encode_prompt( return prompt_embeds, text_encoder_hidden_states, text_mask + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + hook = None + for cpu_offloaded_model in [self.text_encoder, self.prior]: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.prior_hook = hook + + _, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook) + + self.final_offload_hook = hook + @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( @@ -549,8 +581,12 @@ def __call__( # if negative prompt has been defined, we retrieve split the image embedding into two if negative_prompt is None: zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() else: image_embeddings, zero_embeds = image_embeddings.chunk(2) + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.prior_hook.offload() if output_type not in ["pt", "np"]: raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") From 5e465bf7d39754ecb67bb4144cf61f6e0de9d6d7 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 16:32:09 +0000 Subject: [PATCH 08/24] finish --- .../kandinsky/pipeline_kandinsky_combined.py | 570 +++++++++++++++++- .../kandinsky/pipeline_kandinsky_prior.py | 37 ++ .../pipeline_kandinsky2_2_combined.py | 355 ++++++++++- .../pipeline_kandinsky2_2_prior.py | 4 +- .../pipeline_kandinsky2_2_prior_emb2emb.py | 5 +- 5 files changed, 959 insertions(+), 12 deletions(-) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 416f4c6f03a1..c276e7b11b99 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -11,16 +11,580 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Optional, Union + +import PIL +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, + XLMRobertaTokenizer, +) + +from ...models import PriorTransformer, UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler +from ...utils import ( + replace_example_docstring, +) from ..pipeline_utils import DiffusionPipeline +from .pipeline_kandinsky import KandinskyPipeline +from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline +from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline +from .pipeline_kandinsky_prior import KandinskyPriorPipeline +from .text_encoder import MultilingualCLIP + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + + ``` +""" class KandinskyCombinedPipeline(DiffusionPipeline): - pass + """ + Combined Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + _load_connected_pipes = True + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + outputs = self.decoder_pipe( + prompt=prompt, + image_embeds=prior_outputs[0], + negative_image_embeds=prior_outputs[1], + width=width, + height=height, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + ) + return outputs class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): - pass + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + _load_connected_pipes = True + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyImg2ImgPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + strength: float = 0.3, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded + again. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + outputs = self.decoder_pipe( + prompt=prompt, + image=image, + image_embeds=prior_outputs[0], + negative_image_embeds=prior_outputs[1], + strength=strength, + width=width, + height=height, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + ) + return outputs class KandinskyInpaintCombinedPipeline(DiffusionPipeline): - pass + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + _load_connected_pipes = True + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyInpaintPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded + again. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. Black pixels in the mask will be repainted, while + white pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + outputs = self.decoder_pipe( + prompt=prompt, + image=image, + mask_image=mask_image, + image_embeds=prior_outputs[0], + negative_image_embeds=prior_outputs[1], + width=width, + height=height, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + ) + return outputs diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index bf75eeacfdf3..947b6bfd0f8b 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -24,6 +24,8 @@ from ...schedulers import UnCLIPScheduler from ...utils import ( BaseOutput, + is_accelerate_available, + is_accelerate_version, logging, randn_tensor, replace_example_docstring, @@ -393,6 +395,35 @@ def _encode_prompt( return prompt_embeds, text_encoder_hidden_states, text_mask + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + hook = None + for cpu_offloaded_model in [self.text_encoder, self.prior]: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.prior_hook = hook + + _, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook) + + self.final_offload_hook = hook + @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( @@ -525,9 +556,15 @@ def __call__( # if negative prompt has been defined, we retrieve split the image embedding into two if negative_prompt is None: zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() else: image_embeddings, zero_embeds = image_embeddings.chunk(2) + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.prior_hook.offload() + if output_type not in ["pt", "np"]: raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index 33681e1476e4..c4d9ea5f2020 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -14,6 +14,7 @@ from typing import List, Optional, Union +import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection @@ -25,6 +26,8 @@ ) from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky2_2 import KandinskyV22Pipeline +from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline +from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline @@ -33,13 +36,14 @@ TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py + ``` """ class KandinskyV22CombinedPipeline(DiffusionPipeline): """ - Pipeline for text-to-image generation using Kandinsky + Combined Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) @@ -105,7 +109,7 @@ def enable_model_cpu_offload(self, gpu_id=0): self.decoder_pipe.enable_model_cpu_offload() @torch.no_grad() - @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING ) + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], @@ -200,8 +204,351 @@ def __call__( class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): - pass + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + _load_connected_pipes = True + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22Img2ImgPipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded + again. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + outputs = self.decoder_pipe( + image=image, + image_embeds=prior_outputs[0], + negative_image_embeds=prior_outputs[1], + width=width, + height=height, + strength=strength, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + ) + return outputs class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): - pass + """ + Combined Pipeline for inpainting generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + _load_connected_pipes = True + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22InpaintPipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded + again. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. Black pixels in the mask will be repainted, while + white pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + outputs = self.decoder_pipe( + image=image, + mask_image=mask_image, + image_embeds=prior_outputs[0], + negative_image_embeds=prior_outputs[1], + width=width, + height=height, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + ) + return outputs diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index fa9e17dcb4b0..3cf33b563145 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -7,11 +7,11 @@ from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler from ...utils import ( + is_accelerate_available, + is_accelerate_version, logging, randn_tensor, replace_example_docstring, - is_accelerate_available, - is_accelerate_version, ) from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index afd3003668f2..2b8792e09cf0 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -7,12 +7,11 @@ from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler from ...utils import ( + is_accelerate_available, + is_accelerate_version, logging, randn_tensor, replace_example_docstring, - is_accelerate_available, - is_accelerate_version, - ) from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline From 3196d1a1c6bba848607ce9e32200eedb354dec89 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 16:34:02 +0000 Subject: [PATCH 09/24] finish --- .../dummy_torch_and_transformers_objects.py | 90 +++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 016760337c69..254b99e85c05 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -167,6 +167,36 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class KandinskyCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyImg2ImgCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class KandinskyImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -182,6 +212,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class KandinskyInpaintCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class KandinskyInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -227,6 +272,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class KandinskyV22CombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class KandinskyV22ControlnetImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -257,6 +317,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class KandinskyV22Img2ImgCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class KandinskyV22Img2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -272,6 +347,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class KandinskyV22InpaintCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class KandinskyV22InpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] From 931e76d5a2d579b5723fb742b99bbb7143313aff Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 17:43:10 +0000 Subject: [PATCH 10/24] Fix --- src/diffusers/pipelines/pipeline_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 1b01c287b91f..f084c16c1456 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1418,12 +1418,12 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)] snapshot_folder = Path(config_file).parent - all((snapshot_folder / f).is_file() for f in expected_files) + pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) - # if pipeline_is_cached and not force_download: - # if the pipeline is cached, we can directly return it - # else call snapshot_download - # return snapshot_folder + if pipeline_is_cached and not force_download: + # if the pipeline is cached, we can directly return it + # else call snapshot_download + return snapshot_folder user_agent = {"pipeline_class": cls.__name__} if custom_pipeline is not None and not custom_pipeline.endswith(".py"): From ca077c5d852a701d03f8d29fc8819811e487a79d Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 20:39:53 +0000 Subject: [PATCH 11/24] fix more --- src/diffusers/pipelines/auto_pipeline.py | 19 ++++++++++-- src/diffusers/pipelines/pipeline_utils.py | 38 ++++++++++++++++++++--- src/diffusers/utils/torch_utils.py | 4 +++ 3 files changed, 55 insertions(+), 6 deletions(-) diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 180da721d1c2..9ecbf2eb54ff 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -115,7 +115,20 @@ ] -def _get_task_class(mapping, pipeline_class_name): +def _get_connected_pipeline(pipeline_cls): + if pipeline_cls in AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False + ) + if pipeline_cls in AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False + ) + if pipeline_cls in AUTO_INPAINT_PIPELINES_MAPPING.values(): + return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) + + +def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool = True): def get_model(pipeline_class_name): for task_mapping in SUPPORTED_TASKS_MAPPINGS: for model_name, pipeline in task_mapping.items(): @@ -128,7 +141,9 @@ def get_model(pipeline_class_name): task_class = mapping.get(model_name, None) if task_class is not None: return task_class - raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}") + + if throw_error_if_not_exist: + raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}") def _get_signature_keys(obj): diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index f084c16c1456..367c34fc38d7 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -323,7 +323,9 @@ def get_class_obj_and_candidates(library_name, class_name, importable_classes, p return class_obj, class_candidates -def _get_pipeline_class(class_obj, config, custom_pipeline=None, cache_dir=None, revision=None): +def _get_pipeline_class( + class_obj, config, load_connected_pipeline=False, custom_pipeline=None, cache_dir=None, revision=None +): if custom_pipeline is not None: if custom_pipeline.endswith(".py"): path = Path(custom_pipeline) @@ -341,7 +343,22 @@ def _get_pipeline_class(class_obj, config, custom_pipeline=None, cache_dir=None, return class_obj diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0]) - return getattr(diffusers_module, config["_class_name"]) + pipeline_cls = getattr(diffusers_module, config["_class_name"]) + + if load_connected_pipeline: + from .auto_pipeline import _get_connected_pipeline + + connected_pipeline_cls = _get_connected_pipeline(pipeline_cls) + if connected_pipeline_cls is not None: + logger.info( + f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`" + ) + else: + logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.") + + pipeline_cls = connected_pipeline_cls or pipeline_cls + + return pipeline_cls def load_sub_model( @@ -877,6 +894,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False) + load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained @@ -895,6 +913,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P custom_pipeline=custom_pipeline, custom_revision=custom_revision, variant=variant, + load_connected_pipeline=load_connected_pipeline, **kwargs, ) else: @@ -922,7 +941,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P # 3. Load the pipeline class, if using custom module then load it from the hub # if we load from explicit class, let's use it pipeline_class = _get_pipeline_class( - cls, config_dict, custom_pipeline=custom_pipeline, cache_dir=cache_dir, revision=custom_revision + cls, + config_dict, + load_connected_pipeline=load_connected_pipeline, + custom_pipeline=custom_pipeline, + cache_dir=cache_dir, + revision=custom_revision, ) # DEPRECATED: To be removed in 1.0.0 @@ -1269,6 +1293,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: custom_revision = kwargs.pop("custom_revision", None) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) + load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) if use_safetensors and not is_safetensors_available(): raise ValueError( @@ -1359,7 +1384,12 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: # retrieve passed components that should not be downloaded pipeline_class = _get_pipeline_class( - cls, config_dict, custom_pipeline=custom_pipeline, cache_dir=cache_dir, revision=custom_revision + cls, + config_dict, + load_connected_pipeline=load_connected_pipeline, + custom_pipeline=custom_pipeline, + cache_dir=cache_dir, + revision=custom_revision, ) expected_components, _ = cls._get_signature_keys(pipeline_class) passed_components = [k for k in expected_components if k in kwargs] diff --git a/src/diffusers/utils/torch_utils.py b/src/diffusers/utils/torch_utils.py index 5f64bce25e78..99ea4d8cf1d0 100644 --- a/src/diffusers/utils/torch_utils.py +++ b/src/diffusers/utils/torch_utils.py @@ -64,6 +64,10 @@ def randn_tensor( elif gen_device_type != device.type and gen_device_type == "cuda": raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") + # make sure generator list of length 1 is treated like a non-list + if isinstance(generator, list) and len(generator) == 1: + generator = generator[0] + if isinstance(generator, list): shape = (1,) + shape[1:] latents = [ From 9ced89b7984bf341a8b445b8f3b19e7928521821 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 20:57:56 +0000 Subject: [PATCH 12/24] make style --- src/diffusers/pipelines/auto_pipeline.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 9ecbf2eb54ff..df7cf9f189fc 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -116,6 +116,7 @@ def _get_connected_pipeline(pipeline_cls): + # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder if pipeline_cls in AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False @@ -124,7 +125,7 @@ def _get_connected_pipeline(pipeline_cls): return _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False ) - if pipeline_cls in AUTO_INPAINT_PIPELINES_MAPPING.values(): + if pipeline_cls in AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) @@ -387,7 +388,7 @@ def from_pipe(cls, pipeline, **kwargs): if len(missing_modules) > 0: raise ValueError( - f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(passed_class_obj.keys()) + set(original_class_obj.keys())} were passed" + f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" ) model = text_2_image_cls(**text_2_image_kwargs) @@ -632,7 +633,7 @@ def from_pipe(cls, pipeline, **kwargs): if len(missing_modules) > 0: raise ValueError( - f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(passed_class_obj.keys()) + set(original_class_obj.keys())} were passed" + f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" ) model = image_2_image_cls(**image_2_image_kwargs) @@ -875,7 +876,7 @@ def from_pipe(cls, pipeline, **kwargs): if len(missing_modules) > 0: raise ValueError( - f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(passed_class_obj.keys()) + set(original_class_obj.keys())} were passed" + f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" ) model = inpainting_cls(**inpainting_kwargs) From a9630bfbc9b65a925c194818482a36c237c1a475 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 21:36:41 +0000 Subject: [PATCH 13/24] fix kandinsky mask --- docs/source/en/api/pipelines/kandinsky.mdx | 4 ++-- .../pipelines/kandinsky/pipeline_kandinsky_combined.py | 4 ++-- .../pipelines/kandinsky/pipeline_kandinsky_inpaint.py | 6 ++++-- .../kandinsky2_2/pipeline_kandinsky2_2_combined.py | 4 ++-- .../kandinsky2_2/pipeline_kandinsky2_2_inpainting.py | 10 ++++++---- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/docs/source/en/api/pipelines/kandinsky.mdx b/docs/source/en/api/pipelines/kandinsky.mdx index 6b6c64a08951..948b32cab2c2 100644 --- a/docs/source/en/api/pipelines/kandinsky.mdx +++ b/docs/source/en/api/pipelines/kandinsky.mdx @@ -212,9 +212,9 @@ init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) -mask = np.ones((768, 768), dtype=np.float32) +mask = np.zeros((768, 768), dtype=np.float32) # Let's mask out an area above the cat's head -mask[:250, 250:-250] = 0 +mask[:250, 250:-250] = 1 out = pipe( prompt, diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index c276e7b11b99..cca90d02e1f8 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -512,8 +512,8 @@ def __call__( process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded again. mask_image (`np.array`): - Tensor representing an image batch, to mask `image`. Black pixels in the mask will be repainted, while - white pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_prompt (`str` or `List[str]`, *optional*): diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 2b525d4ecba0..87eb412b4aa6 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -65,8 +65,8 @@ ... "/kandinsky/cat.png" ... ) - >>> mask = np.ones((768, 768), dtype=np.float32) - >>> mask[:250, 250:-250] = 0 + >>> mask = np.zeros((768, 768), dtype=np.float32) + >>> mask[:250, 250:-250] = 1 >>> out = pipe( ... prompt, @@ -232,6 +232,8 @@ def prepare_mask_and_masked_image(image, mask, height, width): mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) + mask = 1 - mask + return mask, image diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index c4d9ea5f2020..d368a1db2074 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -477,8 +477,8 @@ def __call__( process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded again. mask_image (`np.array`): - Tensor representing an image batch, to mask `image`. Black pixels in the mask will be repainted, while - white pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_prompt (`str` or `List[str]`, *optional*): diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 76f1bdbbc5ea..0c87433bf281 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -61,8 +61,8 @@ ... "/kandinsky/cat.png" ... ) - >>> mask = np.ones((768, 768), dtype=np.float32) - >>> mask[:250, 250:-250] = 0 + >>> mask = np.zeros((768, 768), dtype=np.float32) + >>> mask[:250, 250:-250] = 1 >>> out = pipe( ... image=init_image, @@ -230,6 +230,8 @@ def prepare_mask_and_masked_image(image, mask, height, width): mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) + mask = 1 - mask + return mask, image @@ -329,8 +331,8 @@ def __call__( `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`np.array`): - Tensor representing an image batch, to mask `image`. Black pixels in the mask will be repainted, while - white pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): From 1a5023b09639fb19ba4a418b44225d44b81e9406 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 21:58:47 +0000 Subject: [PATCH 14/24] fix inpainting test --- tests/pipelines/kandinsky/test_kandinsky_inpaint.py | 8 ++++---- tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index b8777bbaf506..d2e2ed24efef 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -189,8 +189,8 @@ def get_dummy_inputs(self, device, seed=0): image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask - mask = np.ones((64, 64), dtype=np.float32) - mask[:32, :32] = 0 + mask = np.zeros((64, 64), dtype=np.float32) + mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) @@ -296,8 +296,8 @@ def test_kandinsky_inpaint(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) - mask = np.ones((768, 768), dtype=np.float32) - mask[:250, 250:-250] = 0 + mask = np.zeros((768, 768), dtype=np.float32) + mask[:250, 250:-250] = 1 prompt = "a hat" diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py index 9be3993acc6f..7b935eb84510 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py @@ -165,8 +165,8 @@ def get_dummy_inputs(self, device, seed=0): image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask - mask = np.ones((64, 64), dtype=np.float32) - mask[:32, :32] = 0 + mask = np.zeros((64, 64), dtype=np.float32) + mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) @@ -244,8 +244,8 @@ def test_kandinsky_inpaint(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) - mask = np.ones((768, 768), dtype=np.float32) - mask[:250, 250:-250] = 0 + mask = np.zeros((768, 768), dtype=np.float32) + mask[:250, 250:-250] = 1 prompt = "a hat" From 09372a47340e7f4fc8cb1a3d4c6854cf198bc97b Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 25 Jul 2023 23:42:42 +0000 Subject: [PATCH 15/24] add callbacks --- .../pipelines/kandinsky/pipeline_kandinsky.py | 14 +++++++- .../kandinsky/pipeline_kandinsky_combined.py | 32 ++++++++++++++++++- .../kandinsky/pipeline_kandinsky_img2img.py | 13 +++++++- .../kandinsky/pipeline_kandinsky_inpaint.py | 13 +++++++- .../kandinsky2_2/pipeline_kandinsky2_2.py | 13 +++++++- .../pipeline_kandinsky2_2_combined.py | 32 ++++++++++++++++++- .../pipeline_kandinsky2_2_controlnet.py | 13 +++++++- ...ipeline_kandinsky2_2_controlnet_img2img.py | 13 +++++++- .../pipeline_kandinsky2_2_img2img.py | 13 +++++++- .../pipeline_kandinsky2_2_inpainting.py | 14 +++++++- 10 files changed, 160 insertions(+), 10 deletions(-) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index 8e42119191b8..89afa0060ef8 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import torch from transformers import ( @@ -269,6 +269,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -309,6 +311,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -397,6 +405,10 @@ def __call__( latents, generator=generator, ).prev_sample + + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index cca90d02e1f8..f810503b55c7 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import PIL import torch @@ -137,6 +137,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -182,6 +184,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -212,6 +220,8 @@ def __call__( generator=generator, guidance_scale=guidance_scale, output_type=output_type, + callback=callback, + callback_steps=callback_steps, return_dict=return_dict, ) return outputs @@ -312,6 +322,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -367,6 +379,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -399,6 +417,8 @@ def __call__( generator=generator, guidance_scale=guidance_scale, output_type=output_type, + callback=callback, + callback_steps=callback_steps, return_dict=return_dict, ) return outputs @@ -499,6 +519,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -553,6 +575,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -585,6 +613,8 @@ def __call__( generator=generator, guidance_scale=guidance_scale, output_type=output_type, + callback=callback, + callback_steps=callback_steps, return_dict=return_dict, ) return outputs diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py index 14740f7e6afe..5673d306aa0c 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import numpy as np import PIL @@ -332,6 +332,8 @@ def __call__( num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -377,6 +379,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -491,6 +499,9 @@ def __call__( generator=generator, ).prev_sample + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + # 7. post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 87eb412b4aa6..f12e936243d0 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -13,7 +13,7 @@ # limitations under the License. from copy import deepcopy -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import numpy as np import PIL @@ -433,6 +433,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -484,6 +486,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -611,6 +619,9 @@ def __call__( generator=generator, ).prev_sample + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index d36471fd08aa..5e5b83bfa995 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import torch @@ -148,6 +148,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -182,6 +184,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -258,6 +266,9 @@ def __call__( latents, generator=generator, )[0] + + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index d368a1db2074..b69895bce66d 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import PIL import torch @@ -124,6 +124,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -169,6 +171,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -198,6 +206,8 @@ def __call__( generator=generator, guidance_scale=guidance_scale, output_type=output_type, + callback=callback, + callback_steps=callback_steps, return_dict=return_dict, ) return outputs @@ -288,6 +298,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -343,6 +355,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -374,6 +392,8 @@ def __call__( generator=generator, guidance_scale=guidance_scale, output_type=output_type, + callback=callback, + callback_steps=callback_steps, return_dict=return_dict, ) return outputs @@ -464,6 +484,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -518,6 +540,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -549,6 +577,8 @@ def __call__( generator=generator, guidance_scale=guidance_scale, output_type=output_type, + callback=callback, + callback_steps=callback_steps, return_dict=return_dict, ) return outputs diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index feaa51309158..22b3eaf0915e 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import torch @@ -190,6 +190,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -232,6 +234,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -313,6 +321,9 @@ def __call__( latents, generator=generator, )[0] + + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py index 4690cf49af14..53918fede7c2 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import numpy as np import PIL @@ -246,6 +246,8 @@ def __call__( num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -289,6 +291,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -374,6 +382,9 @@ def __call__( generator=generator, )[0] + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py index 1993e44c4c64..dba50312e8d7 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import numpy as np import PIL @@ -218,6 +218,8 @@ def __call__( num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -259,6 +261,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -338,6 +346,9 @@ def __call__( generator=generator, )[0] + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 0c87433bf281..f02a78f9b044 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -13,7 +13,7 @@ # limitations under the License. from copy import deepcopy -from typing import List, Optional, Union +from typing import Callable, List, Optional, Union import numpy as np import PIL @@ -320,6 +320,8 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, return_dict: bool = True, ): """ @@ -362,6 +364,12 @@ def __call__( output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. @@ -472,6 +480,10 @@ def __call__( ) latents = init_mask * init_latents_proper + (1 - init_mask) * latents + + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + # post-processing latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents image = self.movq.decode(latents, force_not_quantize=True)["sample"] From 8f4d316212c717218a86023e70cb13032a0b3b52 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 12:42:15 +0200 Subject: [PATCH 16/24] add tests --- .../kandinsky/pipeline_kandinsky_combined.py | 90 ++++- .../pipeline_kandinsky2_2_combined.py | 90 ++++- src/diffusers/pipelines/pipeline_utils.py | 2 +- tests/pipelines/kandinsky/test_kandinsky.py | 60 ++-- .../kandinsky/test_kandinsky_combined.py | 332 +++++++++++++++++ .../kandinsky/test_kandinsky_img2img.py | 64 ++-- .../kandinsky/test_kandinsky_inpaint.py | 66 ++-- .../kandinsky/test_kandinsky_prior.py | 42 ++- .../pipelines/kandinsky_v22/test_kandinsky.py | 56 +-- .../kandinsky_v22/test_kandinsky_combined.py | 336 ++++++++++++++++++ .../kandinsky_v22/test_kandinsky_img2img.py | 58 +-- .../kandinsky_v22/test_kandinsky_inpaint.py | 60 ++-- .../kandinsky_v22/test_kandinsky_prior.py | 42 ++- 13 files changed, 1095 insertions(+), 203 deletions(-) create mode 100644 tests/pipelines/kandinsky/test_kandinsky_combined.py create mode 100644 tests/pipelines/kandinsky_v22/test_kandinsky_combined.py diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index f810503b55c7..7caf29a01243 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -121,6 +121,15 @@ def enable_model_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( @@ -209,14 +218,21 @@ def __call__( output_type="pt", return_dict=False, ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + outputs = self.decoder_pipe( prompt=prompt, - image_embeds=prior_outputs[0], - negative_image_embeds=prior_outputs[1], + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images_per_prompt, generator=generator, guidance_scale=guidance_scale, output_type=output_type, @@ -304,6 +320,15 @@ def enable_model_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( @@ -404,16 +429,31 @@ def __call__( output_type="pt", return_dict=False, ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + outputs = self.decoder_pipe( prompt=prompt, image=image, - image_embeds=prior_outputs[0], - negative_image_embeds=prior_outputs[1], + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, strength=strength, width=width, height=height, num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images_per_prompt, generator=generator, guidance_scale=guidance_scale, output_type=output_type, @@ -501,6 +541,15 @@ def enable_model_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( @@ -600,16 +649,39 @@ def __call__( output_type="pt", return_dict=False, ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + if ( + isinstance(mask_image, (list, tuple)) + and len(mask_image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(mask_image) == 0 + ): + mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image + outputs = self.decoder_pipe( prompt=prompt, image=image, mask_image=mask_image, - image_embeds=prior_outputs[0], - negative_image_embeds=prior_outputs[1], + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images_per_prompt, generator=generator, guidance_scale=guidance_scale, output_type=output_type, diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index b69895bce66d..e06246620be9 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -108,6 +108,15 @@ def enable_model_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( @@ -196,13 +205,20 @@ def __call__( output_type="pt", return_dict=False, ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + outputs = self.decoder_pipe( - image_embeds=prior_outputs[0], - negative_image_embeds=prior_outputs[1], + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images_per_prompt, generator=generator, guidance_scale=guidance_scale, output_type=output_type, @@ -280,6 +296,15 @@ def enable_model_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( @@ -380,15 +405,30 @@ def __call__( output_type="pt", return_dict=False, ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + outputs = self.decoder_pipe( image=image, - image_embeds=prior_outputs[0], - negative_image_embeds=prior_outputs[1], + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, width=width, height=height, strength=strength, num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images_per_prompt, generator=generator, guidance_scale=guidance_scale, output_type=output_type, @@ -466,6 +506,15 @@ def enable_model_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( @@ -565,15 +614,38 @@ def __call__( output_type="pt", return_dict=False, ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + if ( + isinstance(mask_image, (list, tuple)) + and len(mask_image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(mask_image) == 0 + ): + mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image + outputs = self.decoder_pipe( image=image, mask_image=mask_image, - image_embeds=prior_outputs[0], - negative_image_embeds=prior_outputs[1], + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images_per_prompt, generator=generator, guidance_scale=guidance_scale, output_type=output_type, diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 367c34fc38d7..d95b3f6c2636 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1087,7 +1087,7 @@ def load_module(name, value): init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) - if pipeline_class._load_connected_pipes: + if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")): modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} load_kwargs = { diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py index 9c0b7c732933..c5b68b6cb35c 100644 --- a/tests/pipelines/kandinsky/test_kandinsky.py +++ b/tests/pipelines/kandinsky/test_kandinsky.py @@ -32,30 +32,7 @@ enable_full_determinism() -class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyPipeline - params = [ - "prompt", - "image_embeds", - "negative_image_embeds", - ] - batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"] - required_optional_params = [ - "generator", - "height", - "width", - "latents", - "guidance_scale", - "negative_prompt", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -74,7 +51,7 @@ def time_embed_dim(self): @property def cross_attention_dim(self): - return 100 + return 32 @property def dummy_tokenizer(self): @@ -196,6 +173,39 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyPipeline + params = [ + "prompt", + "image_embeds", + "negative_image_embeds", + ] + batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = Dummies() + return dummy.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummy = Dummies() + return dummy.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky(self): device = "cpu" diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py new file mode 100644 index 000000000000..96c715bbc1e6 --- /dev/null +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -0,0 +1,332 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline +from diffusers.utils import torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu + +from ..test_pipelines_common import PipelineTesterMixin +from .test_kandinsky import Dummies +from .test_kandinsky_img2img import Dummies as Img2ImgDummies +from .test_kandinsky_inpaint import Dummies as InpaintDummies +from .test_kandinsky_prior import Dummies as PriorDummies + + +enable_full_determinism() + + +class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyCombinedPipeline + params = [ + "prompt", + ] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = Dummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update( + { + "height": 64, + "width": 64, + } + ) + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] + ) + + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + + @require_torch_gpu + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + +class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyImg2ImgCombinedPipeline + params = ["prompt", "image"] + batch_params = ["prompt", "negative_prompt", "image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = Img2ImgDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = Img2ImgDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] + ) + + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + + @require_torch_gpu + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + +class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyInpaintCombinedPipeline + params = ["prompt", "image", "mask_image"] + batch_params = ["prompt", "negative_prompt", "image", "mask_image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = InpaintDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = InpaintDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] + ) + + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + + @require_torch_gpu + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py index 6b558d4a06b7..47201322f708 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -40,32 +40,7 @@ enable_full_determinism() -class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyImg2ImgPipeline - params = ["prompt", "image_embeds", "negative_image_embeds", "image"] - batch_params = [ - "prompt", - "negative_prompt", - "image_embeds", - "negative_image_embeds", - "image", - ] - required_optional_params = [ - "generator", - "height", - "width", - "strength", - "guidance_scale", - "negative_prompt", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -84,7 +59,7 @@ def time_embed_dim(self): @property def cross_attention_dim(self): - return 100 + return 32 @property def dummy_tokenizer(self): @@ -216,6 +191,41 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyImg2ImgPipeline + params = ["prompt", "image_embeds", "negative_image_embeds", "image"] + batch_params = [ + "prompt", + "negative_prompt", + "image_embeds", + "negative_image_embeds", + "image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "strength", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky_img2img(self): device = "cpu" diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index d2e2ed24efef..3e84e2050067 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -33,33 +33,7 @@ enable_full_determinism() -class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyInpaintPipeline - params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] - batch_params = [ - "prompt", - "negative_prompt", - "image_embeds", - "negative_image_embeds", - "image", - "mask_image", - ] - required_optional_params = [ - "generator", - "height", - "width", - "latents", - "guidance_scale", - "negative_prompt", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -78,7 +52,7 @@ def time_embed_dim(self): @property def cross_attention_dim(self): - return 100 + return 32 @property def dummy_tokenizer(self): @@ -211,6 +185,42 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyInpaintPipeline + params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] + batch_params = [ + "prompt", + "negative_prompt", + "image_embeds", + "negative_image_embeds", + "image", + "mask_image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky_inpaint(self): device = "cpu" diff --git a/tests/pipelines/kandinsky/test_kandinsky_prior.py b/tests/pipelines/kandinsky/test_kandinsky_prior.py index d9c260eabc06..7b1acc9fc03e 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_prior.py +++ b/tests/pipelines/kandinsky/test_kandinsky_prior.py @@ -37,22 +37,7 @@ enable_full_determinism() -class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyPriorPipeline - params = ["prompt"] - batch_params = ["prompt", "negative_prompt"] - required_optional_params = [ - "num_images_per_prompt", - "generator", - "num_inference_steps", - "latents", - "negative_prompt", - "guidance_scale", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -183,6 +168,31 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyPriorPipeline + params = ["prompt"] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "num_images_per_prompt", + "generator", + "num_inference_steps", + "latents", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = Dummies() + return dummy.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummy = Dummies() + return dummy.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky_prior(self): device = "cpu" diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky.py b/tests/pipelines/kandinsky_v22/test_kandinsky.py index 162c96d4b3e2..f2ca10854e27 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky.py @@ -30,28 +30,7 @@ enable_full_determinism() -class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyV22Pipeline - params = [ - "image_embeds", - "negative_image_embeds", - ] - batch_params = ["image_embeds", "negative_image_embeds"] - required_optional_params = [ - "generator", - "height", - "width", - "latents", - "guidance_scale", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -70,7 +49,7 @@ def time_embed_dim(self): @property def cross_attention_dim(self): - return 100 + return 32 @property def dummy_unet(self): @@ -166,6 +145,37 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22Pipeline + params = [ + "image_embeds", + "negative_image_embeds", + ] + batch_params = ["image_embeds", "negative_image_embeds"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + def test_kandinsky(self): device = "cpu" diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py new file mode 100644 index 000000000000..5625c60ef84c --- /dev/null +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py @@ -0,0 +1,336 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from diffusers import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, +) +from diffusers.utils import torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu + +from ..test_pipelines_common import PipelineTesterMixin +from .test_kandinsky import Dummies +from .test_kandinsky_img2img import Dummies as Img2ImgDummies +from .test_kandinsky_inpaint import Dummies as InpaintDummies +from .test_kandinsky_prior import Dummies as PriorDummies + + +enable_full_determinism() + + +class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22CombinedPipeline + params = [ + "prompt", + ] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = Dummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update( + { + "height": 64, + "width": 64, + } + ) + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] + ) + + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + + @require_torch_gpu + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + +class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22Img2ImgCombinedPipeline + params = ["prompt", "image"] + batch_params = ["prompt", "negative_prompt", "image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = Img2ImgDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = Img2ImgDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] + ) + + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + + @require_torch_gpu + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + +class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22InpaintCombinedPipeline + params = ["prompt", "image", "mask_image"] + batch_params = ["prompt", "negative_prompt", "image", "mask_image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummy = InpaintDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = InpaintDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] + ) + + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + + @require_torch_gpu + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py index 069854325fd4..84b92d9f405d 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py @@ -37,29 +37,7 @@ enable_full_determinism() -class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyV22Img2ImgPipeline - params = ["image_embeds", "negative_image_embeds", "image"] - batch_params = [ - "image_embeds", - "negative_image_embeds", - "image", - ] - required_optional_params = [ - "generator", - "height", - "width", - "strength", - "guidance_scale", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -78,7 +56,7 @@ def time_embed_dim(self): @property def cross_attention_dim(self): - return 100 + return 32 @property def dummy_unet(self): @@ -184,6 +162,38 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22Img2ImgPipeline + params = ["image_embeds", "negative_image_embeds", "image"] + batch_params = [ + "image_embeds", + "negative_image_embeds", + "image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "strength", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky_img2img(self): device = "cpu" diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py index 7b935eb84510..e08e2c08f2d9 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py @@ -37,30 +37,7 @@ enable_full_determinism() -class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyV22InpaintPipeline - params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] - batch_params = [ - "image_embeds", - "negative_image_embeds", - "image", - "mask_image", - ] - required_optional_params = [ - "generator", - "height", - "width", - "latents", - "guidance_scale", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -79,7 +56,7 @@ def time_embed_dim(self): @property def cross_attention_dim(self): - return 100 + return 32 @property def dummy_unet(self): @@ -186,6 +163,39 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22InpaintPipeline + params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] + batch_params = [ + "image_embeds", + "negative_image_embeds", + "image", + "mask_image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky_inpaint(self): device = "cpu" diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py b/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py index 1b8cefa91f4e..3191f6a11309 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py @@ -37,22 +37,7 @@ enable_full_determinism() -class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyV22PriorPipeline - params = ["prompt"] - batch_params = ["prompt", "negative_prompt"] - required_optional_params = [ - "num_images_per_prompt", - "generator", - "num_inference_steps", - "latents", - "negative_prompt", - "guidance_scale", - "output_type", - "return_dict", - ] - test_xformers_attention = False - +class Dummies: @property def text_embedder_hidden_size(self): return 32 @@ -183,6 +168,31 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + +class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22PriorPipeline + params = ["prompt"] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "num_images_per_prompt", + "generator", + "num_inference_steps", + "latents", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + def test_kandinsky_prior(self): device = "cpu" From d70d0e1164370f227df8abc05b4304ff1f1a7812 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 13:03:07 +0200 Subject: [PATCH 17/24] fix tests --- test_corrections.txt | 12 ++++++++++++ tests/pipelines/kandinsky/test_kandinsky.py | 4 +--- tests/pipelines/kandinsky/test_kandinsky_combined.py | 12 +++--------- tests/pipelines/kandinsky/test_kandinsky_img2img.py | 4 +--- tests/pipelines/kandinsky/test_kandinsky_inpaint.py | 6 +----- tests/pipelines/kandinsky_v22/test_kandinsky.py | 4 +--- .../kandinsky_v22/test_kandinsky_combined.py | 12 +++--------- .../kandinsky_v22/test_kandinsky_img2img.py | 4 +--- 8 files changed, 23 insertions(+), 35 deletions(-) create mode 100644 test_corrections.txt diff --git a/test_corrections.txt b/test_corrections.txt new file mode 100644 index 000000000000..3fda47895c33 --- /dev/null +++ b/test_corrections.txt @@ -0,0 +1,12 @@ +tests/pipelines/kandinsky_v22/test_kandinsky.py;KandinskyV22PipelineFastTests;test_kandinsky;expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) +tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineCombinedFastTests;test_kandinsky;expected_slice = np.array([0.0349, 0.2685, 0.2658, 0.1627, 0.0229, 0.0680, 0.6497, 0.5459, 0.4795]) +tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineImg2ImgCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4680, 0.5258, 0.5191, 0.4720, 0.4972, 0.4856, 0.4958, 0.4713, 0.4833]) +tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineInpaintCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4946, 0.4839, 0.4792, 0.4892, 0.4759, 0.4842, 0.4729, 0.4696, 0.4774]) +tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py;KandinskyV22Img2ImgPipelineFastTests;test_kandinsky_img2img;expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) +tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py;KandinskyV22InpaintPipelineFastTests;test_kandinsky_inpaint;expected_slice = np.array([0.5008, 0.4897, 0.4887, 0.4937, 0.4808, 0.4936, 0.4739, 0.4703, 0.4828]) +tests/pipelines/kandinsky_v22/test_kandinsky.py;KandinskyV22PipelineFastTests;test_kandinsky;expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) +tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineCombinedFastTests;test_kandinsky;expected_slice = np.array([0.0349, 0.2685, 0.2658, 0.1627, 0.0229, 0.0680, 0.6497, 0.5459, 0.4795]) +tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineImg2ImgCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4680, 0.5258, 0.5191, 0.4720, 0.4972, 0.4856, 0.4958, 0.4713, 0.4833]) +tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineInpaintCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4946, 0.4839, 0.4792, 0.4892, 0.4759, 0.4842, 0.4729, 0.4696, 0.4774]) +tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py;KandinskyV22Img2ImgPipelineFastTests;test_kandinsky_img2img;expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) +tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py;KandinskyV22InpaintPipelineFastTests;test_kandinsky_inpaint;expected_slice = np.array([0.5008, 0.4897, 0.4887, 0.4937, 0.4808, 0.4936, 0.4739, 0.4703, 0.4828]) diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py index c5b68b6cb35c..01b8a0f3eec1 100644 --- a/tests/pipelines/kandinsky/test_kandinsky.py +++ b/tests/pipelines/kandinsky/test_kandinsky.py @@ -229,9 +229,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index 96c715bbc1e6..c42a7f351319 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -95,9 +95,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([0.1579, 0.2966, 0.3703, 0.1633, 0.2813, 0.3113, 0.6867, 0.6880, 0.4141]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -194,9 +192,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([0.4451, 0.4793, 0.4002, 0.4480, 0.4170, 0.4208, 0.5097, 0.5002, 0.4693]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -293,9 +289,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([0.0764, 0.1887, 0.0870, 0.0000, 0.0000, 0.0765, 0.3652, 0.4669, 0.4194]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py index 47201322f708..d6588bd45dd7 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -249,9 +249,7 @@ def test_kandinsky_img2img(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] - ) + expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index 3e84e2050067..7f1841d60807 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -242,13 +242,9 @@ def test_kandinsky_inpaint(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print(f"image.shape {image.shape}") - assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] - ) + expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky.py b/tests/pipelines/kandinsky_v22/test_kandinsky.py index f2ca10854e27..6430a476ab98 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky.py @@ -199,9 +199,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] - ) + expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py index 5625c60ef84c..b3bf0ceba9f4 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py @@ -99,9 +99,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([0.0349, 0.2685, 0.2658, 0.1627, 0.0229, 0.0680, 0.6497, 0.5459, 0.4795]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -198,9 +196,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([0.4680, 0.5258, 0.5191, 0.4720, 0.4972, 0.4856, 0.4958, 0.4713, 0.4833]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -297,9 +293,7 @@ def test_kandinsky(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953] - ) + expected_slice = np.array([0.4946, 0.4839, 0.4792, 0.4892, 0.4759, 0.4842, 0.4729, 0.4696, 0.4774]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py index 84b92d9f405d..17f27d0d7804 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py @@ -217,9 +217,7 @@ def test_kandinsky_img2img(self): assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] - ) + expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" From a649f60498f5e92f178edb7a110c8e1be79a3380 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 13:13:46 +0200 Subject: [PATCH 18/24] Apply suggestions from code review Co-authored-by: YiYi Xu --- .../kandinsky/pipeline_kandinsky_combined.py | 12 ++++++------ .../kandinsky2_2/pipeline_kandinsky2_2_combined.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 7caf29a01243..8edd7c3411be 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -136,13 +136,13 @@ def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, - num_inference_steps: int = 25, + num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, - prior_num_inference_steps: int = 100, + prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", @@ -336,14 +336,14 @@ def __call__( prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, - num_inference_steps: int = 25, + num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, strength: float = 0.3, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, - prior_num_inference_steps: int = 100, + prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", @@ -558,13 +558,13 @@ def __call__( image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, - num_inference_steps: int = 25, + num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, - prior_num_inference_steps: int = 100, + prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index e06246620be9..75bd711dd946 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -123,13 +123,13 @@ def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, - num_inference_steps: int = 25, + num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, - prior_num_inference_steps: int = 100, + prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", @@ -312,14 +312,14 @@ def __call__( prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, - num_inference_steps: int = 25, + num_inference_steps: int = 100, guidance_scale: float = 4.0, strength: float = 0.3, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, - prior_num_inference_steps: int = 100, + prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", @@ -523,13 +523,13 @@ def __call__( image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, - num_inference_steps: int = 25, + num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, - prior_num_inference_steps: int = 100, + prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", From 435657d0c2c0cdd1798e9e4e377499e72502e060 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 13:35:05 +0200 Subject: [PATCH 19/24] docs --- docs/source/en/_toctree.yml | 2 + docs/source/en/api/pipelines/kandinsky.mdx | 274 ++------------ .../source/en/api/pipelines/kandinsky_v22.mdx | 342 ++++++++++++++++++ src/diffusers/pipelines/auto_pipeline.py | 18 +- .../pipelines/kandinsky2_2/__init__.py | 10 +- 5 files changed, 383 insertions(+), 263 deletions(-) create mode 100644 docs/source/en/api/pipelines/kandinsky_v22.mdx diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 53718f8d0e7a..2a9de8b3f24b 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -206,6 +206,8 @@ title: InstructPix2Pix - local: api/pipelines/kandinsky title: Kandinsky + - local: api/pipelines/kandinsky_v22 + title: Kandinsky 2.2 - local: api/pipelines/latent_diffusion title: Latent Diffusion - local: api/pipelines/panorama diff --git a/docs/source/en/api/pipelines/kandinsky.mdx b/docs/source/en/api/pipelines/kandinsky.mdx index 948b32cab2c2..79c602b8bc07 100644 --- a/docs/source/en/api/pipelines/kandinsky.mdx +++ b/docs/source/en/api/pipelines/kandinsky.mdx @@ -276,208 +276,6 @@ image.save("starry_cat.png") ``` ![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/starry_cat.png) - -### Text-to-Image Generation with ControlNet Conditioning - -In the following, we give a simple example of how to use [`KandinskyV22ControlnetPipeline`] to add control to the text-to-image generation with a depth image. - -First, let's take an image and extract its depth map. - -```python -from diffusers.utils import load_image - -img = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png" -).resize((768, 768)) -``` -![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png) - -We can use the `depth-estimation` pipeline from transformers to process the image and retrieve its depth map. - -```python -import torch -import numpy as np - -from transformers import pipeline -from diffusers.utils import load_image - - -def make_hint(image, depth_estimator): - image = depth_estimator(image)["depth"] - image = np.array(image) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis=2) - detected_map = torch.from_numpy(image).float() / 255.0 - hint = detected_map.permute(2, 0, 1) - return hint - - -depth_estimator = pipeline("depth-estimation") -hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") -``` -Now, we load the prior pipeline and the text-to-image controlnet pipeline - -```python -from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline - -pipe_prior = KandinskyV22PriorPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 -) -pipe_prior = pipe_prior.to("cuda") - -pipe = KandinskyV22ControlnetPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 -) -pipe = pipe.to("cuda") -``` - -We pass the prompt and negative prompt through the prior to generate image embeddings - -```python -prompt = "A robot, 4k photo" - -negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" - -generator = torch.Generator(device="cuda").manual_seed(43) -image_emb, zero_image_emb = pipe_prior( - prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator -).to_tuple() -``` - -Now we can pass the image embeddings and the depth image we extracted to the controlnet pipeline. With Kandinsky 2.2, only prior pipelines accept `prompt` input. You do not need to pass the prompt to the controlnet pipeline. - -```python -images = pipe( - image_embeds=image_emb, - negative_image_embeds=zero_image_emb, - hint=hint, - num_inference_steps=50, - generator=generator, - height=768, - width=768, -).images - -images[0].save("robot_cat.png") -``` - -The output image looks as follow: -![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat_text2img.png) - -### Image-to-Image Generation with ControlNet Conditioning - -Kandinsky 2.2 also includes a [`KandinskyV22ControlnetImg2ImgPipeline`] that will allow you to add control to the image generation process with both the image and its depth map. This pipeline works really well with [`KandinskyV22PriorEmb2EmbPipeline`], which generates image embeddings based on both a text prompt and an image. - -For our robot cat example, we will pass the prompt and cat image together to the prior pipeline to generate an image embedding. We will then use that image embedding and the depth map of the cat to further control the image generation process. - -We can use the same cat image and its depth map from the last example. - -```python -import torch -import numpy as np - -from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline -from diffusers.utils import load_image -from transformers import pipeline - -img = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/cat.png" -).resize((768, 768)) - - -def make_hint(image, depth_estimator): - image = depth_estimator(image)["depth"] - image = np.array(image) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis=2) - detected_map = torch.from_numpy(image).float() / 255.0 - hint = detected_map.permute(2, 0, 1) - return hint - - -depth_estimator = pipeline("depth-estimation") -hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") - -pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 -) -pipe_prior = pipe_prior.to("cuda") - -pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 -) -pipe = pipe.to("cuda") - -prompt = "A robot, 4k photo" -negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" - -generator = torch.Generator(device="cuda").manual_seed(43) - -# run prior pipeline - -img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator) -negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator) - -# run controlnet img2img pipeline -images = pipe( - image=img, - strength=0.5, - image_embeds=img_emb.image_embeds, - negative_image_embeds=negative_emb.image_embeds, - hint=hint, - num_inference_steps=50, - generator=generator, - height=768, - width=768, -).images - -images[0].save("robot_cat.png") -``` - -Here is the output. Compared with the output from our text-to-image controlnet example, it kept a lot more cat facial details from the original image and worked into the robot style we asked for. - -![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat.png) - -## Kandinsky 2.2 - -The Kandinsky 2.2 release includes robust new text-to-image models that support text-to-image generation, image-to-image generation, image interpolation, and text-guided image inpainting. The general workflow to perform these tasks using Kandinsky 2.2 is the same as in Kandinsky 2.1. First, you will need to use a prior pipeline to generate image embeddings based on your text prompt, and then use one of the image decoding pipelines to generate the output image. The only difference is that in Kandinsky 2.2, all of the decoding pipelines no longer accept the `prompt` input, and the image generation process is conditioned with only `image_embeds` and `negative_image_embeds`. - -Let's look at an example of how to perform text-to-image generation using Kandinsky 2.2. - -First, let's create the prior pipeline and text-to-image pipeline with Kandinsky 2.2 checkpoints. - -```python -from diffusers import DiffusionPipeline -import torch - -pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16) -pipe_prior.to("cuda") - -t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) -t2i_pipe.to("cuda") -``` - -You can then use `pipe_prior` to generate image embeddings. - -```python -prompt = "portrait of a women, blue eyes, cinematic" -negative_prompt = "low quality, bad quality" - -image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple() -``` - -Now you can pass these embeddings to the text-to-image pipeline. When using Kandinsky 2.2 you don't need to pass the `prompt` (but you do with the previous version, Kandinsky 2.1). - -``` -image = t2i_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[ - 0 -] -image.save("portrait.png") -``` -![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/%20blue%20eyes.png) - -We used the text-to-image pipeline as an example, but the same process applies to all decoding pipelines in Kandinsky 2.2. For more information, please refer to our API section for each pipeline. - - ## Optimization Running Kandinsky in inference requires running both a first prior pipeline: [`KandinskyPriorPipeline`] @@ -530,85 +328,63 @@ t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=T After compilation you should see a very fast inference time. For more information, feel free to have a look at [Our PyTorch 2.0 benchmark](https://huggingface.co/docs/diffusers/main/en/optimization/torch2.0). + + +To generate images directly from a single pipeline, you can use [`KandinskyCombinedPipeline`], [`KandinskyImg2ImgCombinedPipeline`], [`KandinskyInpaintCombinedPipeline`]. +These combined pipelines wrap the [`KandinskyPriorPipeline`] and [`KandinskyPipeline`], [`KandinskyImg2ImgPipeline`], [`KandinskyInpaintPipeline`] respectively into a single +pipeline for a simpler user experience + + + ## Available Pipelines: | Pipeline | Tasks | |---|---| -| [pipeline_kandinsky2_2.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py) | *Text-to-Image Generation* | | [pipeline_kandinsky.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py) | *Text-to-Image Generation* | -| [pipeline_kandinsky2_2_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py) | *Image-Guided Image Generation* | +| [pipeline_kandinsky_combined.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky_combined.py) | *End-to-end Text-to-Image, image-to-image, Inpainting Generation* | | [pipeline_kandinsky_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py) | *Image-Guided Image Generation* | -| [pipeline_kandinsky2_2_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py) | *Image-Guided Image Generation* | | [pipeline_kandinsky_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py) | *Image-Guided Image Generation* | -| [pipeline_kandinsky2_2_controlnet.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py) | *Image-Guided Image Generation* | -| [pipeline_kandinsky2_2_controlnet_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py) | *Image-Guided Image Generation* | - - -### KandinskyV22Pipeline - -[[autodoc]] KandinskyV22Pipeline - - all - - __call__ - -### KandinskyV22ControlnetPipeline -[[autodoc]] KandinskyV22ControlnetPipeline - - all - - __call__ -### KandinskyV22ControlnetImg2ImgPipeline +### KandinskyPriorPipeline -[[autodoc]] KandinskyV22ControlnetImg2ImgPipeline +[[autodoc]] KandinskyPriorPipeline - all - __call__ + - interpolate + +### KandinskyPipeline -### KandinskyV22Img2ImgPipeline - -[[autodoc]] KandinskyV22Img2ImgPipeline +[[autodoc]] KandinskyPipeline - all - __call__ -### KandinskyV22InpaintPipeline +### KandinskyImg2ImgPipeline -[[autodoc]] KandinskyV22InpaintPipeline +[[autodoc]] KandinskyImg2ImgPipeline - all - __call__ -### KandinskyV22PriorPipeline - -[[autodoc]] ## KandinskyV22PriorPipeline - - all - - __call__ - - interpolate - -### KandinskyV22PriorEmb2EmbPipeline +### KandinskyInpaintPipeline -[[autodoc]] KandinskyV22PriorEmb2EmbPipeline +[[autodoc]] KandinskyInpaintPipeline - all - __call__ - - interpolate -### KandinskyPriorPipeline +### KandinskyCombinedPipeline -[[autodoc]] KandinskyPriorPipeline +[[autodoc]] KandinskyCombinedPipeline - all - __call__ - - interpolate - -### KandinskyPipeline -[[autodoc]] KandinskyPipeline - - all - - __call__ - -### KandinskyImg2ImgPipeline +### KandinskyImg2ImgCombinedPipeline -[[autodoc]] KandinskyImg2ImgPipeline +[[autodoc]] KandinskyImg2ImgCombinedPipeline - all - __call__ -### KandinskyInpaintPipeline +### KandinskyInpaintCombinedPipeline -[[autodoc]] KandinskyInpaintPipeline +[[autodoc]] KandinskyInpaintCombinedPipeline - all - __call__ diff --git a/docs/source/en/api/pipelines/kandinsky_v22.mdx b/docs/source/en/api/pipelines/kandinsky_v22.mdx new file mode 100644 index 000000000000..5ffb1ed22724 --- /dev/null +++ b/docs/source/en/api/pipelines/kandinsky_v22.mdx @@ -0,0 +1,342 @@ + + +## Kandinsky 2.2 + +The Kandinsky 2.2 release includes robust new text-to-image models that support text-to-image generation, image-to-image generation, image interpolation, and text-guided image inpainting. The general workflow to perform these tasks using Kandinsky 2.2 is the same as in Kandinsky 2.1. First, you will need to use a prior pipeline to generate image embeddings based on your text prompt, and then use one of the image decoding pipelines to generate the output image. The only difference is that in Kandinsky 2.2, all of the decoding pipelines no longer accept the `prompt` input, and the image generation process is conditioned with only `image_embeds` and `negative_image_embeds`. + +Let's look at an example of how to perform text-to-image generation using Kandinsky 2.2. + +First, let's create the prior pipeline and text-to-image pipeline with Kandinsky 2.2 checkpoints. + +```python +from diffusers import DiffusionPipeline +import torch + +pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16) +pipe_prior.to("cuda") + +t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) +t2i_pipe.to("cuda") +``` + +You can then use `pipe_prior` to generate image embeddings. + +```python +prompt = "portrait of a women, blue eyes, cinematic" +negative_prompt = "low quality, bad quality" + +image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple() +``` + +Now you can pass these embeddings to the text-to-image pipeline. When using Kandinsky 2.2 you don't need to pass the `prompt` (but you do with the previous version, Kandinsky 2.1). + +``` +image = t2i_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[ + 0 +] +image.save("portrait.png") +``` +![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/%20blue%20eyes.png) + +We used the text-to-image pipeline as an example, but the same process applies to all decoding pipelines in Kandinsky 2.2. For more information, please refer to our API section for each pipeline. + +### Text-to-Image Generation with ControlNet Conditioning + +In the following, we give a simple example of how to use [`KandinskyV22ControlnetPipeline`] to add control to the text-to-image generation with a depth image. + +First, let's take an image and extract its depth map. + +```python +from diffusers.utils import load_image + +img = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png" +).resize((768, 768)) +``` +![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png) + +We can use the `depth-estimation` pipeline from transformers to process the image and retrieve its depth map. + +```python +import torch +import numpy as np + +from transformers import pipeline +from diffusers.utils import load_image + + +def make_hint(image, depth_estimator): + image = depth_estimator(image)["depth"] + image = np.array(image) + image = image[:, :, None] + image = np.concatenate([image, image, image], axis=2) + detected_map = torch.from_numpy(image).float() / 255.0 + hint = detected_map.permute(2, 0, 1) + return hint + + +depth_estimator = pipeline("depth-estimation") +hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") +``` +Now, we load the prior pipeline and the text-to-image controlnet pipeline + +```python +from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline + +pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 +) +pipe_prior = pipe_prior.to("cuda") + +pipe = KandinskyV22ControlnetPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 +) +pipe = pipe.to("cuda") +``` + +We pass the prompt and negative prompt through the prior to generate image embeddings + +```python +prompt = "A robot, 4k photo" + +negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" + +generator = torch.Generator(device="cuda").manual_seed(43) +image_emb, zero_image_emb = pipe_prior( + prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator +).to_tuple() +``` + +Now we can pass the image embeddings and the depth image we extracted to the controlnet pipeline. With Kandinsky 2.2, only prior pipelines accept `prompt` input. You do not need to pass the prompt to the controlnet pipeline. + +```python +images = pipe( + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + hint=hint, + num_inference_steps=50, + generator=generator, + height=768, + width=768, +).images + +images[0].save("robot_cat.png") +``` + +The output image looks as follow: +![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat_text2img.png) + +### Image-to-Image Generation with ControlNet Conditioning + +Kandinsky 2.2 also includes a [`KandinskyV22ControlnetImg2ImgPipeline`] that will allow you to add control to the image generation process with both the image and its depth map. This pipeline works really well with [`KandinskyV22PriorEmb2EmbPipeline`], which generates image embeddings based on both a text prompt and an image. + +For our robot cat example, we will pass the prompt and cat image together to the prior pipeline to generate an image embedding. We will then use that image embedding and the depth map of the cat to further control the image generation process. + +We can use the same cat image and its depth map from the last example. + +```python +import torch +import numpy as np + +from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline +from diffusers.utils import load_image +from transformers import pipeline + +img = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/cat.png" +).resize((768, 768)) + + +def make_hint(image, depth_estimator): + image = depth_estimator(image)["depth"] + image = np.array(image) + image = image[:, :, None] + image = np.concatenate([image, image, image], axis=2) + detected_map = torch.from_numpy(image).float() / 255.0 + hint = detected_map.permute(2, 0, 1) + return hint + + +depth_estimator = pipeline("depth-estimation") +hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") + +pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 +) +pipe_prior = pipe_prior.to("cuda") + +pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 +) +pipe = pipe.to("cuda") + +prompt = "A robot, 4k photo" +negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" + +generator = torch.Generator(device="cuda").manual_seed(43) + +# run prior pipeline + +img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator) +negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator) + +# run controlnet img2img pipeline +images = pipe( + image=img, + strength=0.5, + image_embeds=img_emb.image_embeds, + negative_image_embeds=negative_emb.image_embeds, + hint=hint, + num_inference_steps=50, + generator=generator, + height=768, + width=768, +).images + +images[0].save("robot_cat.png") +``` + +Here is the output. Compared with the output from our text-to-image controlnet example, it kept a lot more cat facial details from the original image and worked into the robot style we asked for. + +![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat.png) + +## Optimization + +Running Kandinsky in inference requires running both a first prior pipeline: [`KandinskyPriorPipeline`] +and a second image decoding pipeline which is one of [`KandinskyPipeline`], [`KandinskyImg2ImgPipeline`], or [`KandinskyInpaintPipeline`]. + +The bulk of the computation time will always be the second image decoding pipeline, so when looking +into optimizing the model, one should look into the second image decoding pipeline. + +When running with PyTorch < 2.0, we strongly recommend making use of [`xformers`](https://github.com/facebookresearch/xformers) +to speed-up the optimization. This can be done by simply running: + +```py +from diffusers import DiffusionPipeline +import torch + +t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) +t2i_pipe.enable_xformers_memory_efficient_attention() +``` + +When running on PyTorch >= 2.0, PyTorch's SDPA attention will automatically be used. For more information on +PyTorch's SDPA, feel free to have a look at [this blog post](https://pytorch.org/blog/accelerated-diffusers-pt-20/). + +To have explicit control , you can also manually set the pipeline to use PyTorch's 2.0 efficient attention: + +```py +from diffusers.models.attention_processor import AttnAddedKVProcessor2_0 + +t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0()) +``` + +The slowest and most memory intense attention processor is the default `AttnAddedKVProcessor` processor. +We do **not** recommend using it except for testing purposes or cases where very high determistic behaviour is desired. +You can set it with: + +```py +from diffusers.models.attention_processor import AttnAddedKVProcessor + +t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor()) +``` + +With PyTorch >= 2.0, you can also use Kandinsky with `torch.compile` which depending +on your hardware can signficantly speed-up your inference time once the model is compiled. +To use Kandinsksy with `torch.compile`, you can do: + +```py +t2i_pipe.unet.to(memory_format=torch.channels_last) +t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=True) +``` + +After compilation you should see a very fast inference time. For more information, +feel free to have a look at [Our PyTorch 2.0 benchmark](https://huggingface.co/docs/diffusers/main/en/optimization/torch2.0). + + + +To generate images directly from a single pipeline, you can use [`KandinskyV22CombinedPipeline`], [`KandinskyV22Img2ImgCombinedPipeline`], [`KandinskyV22InpaintCombinedPipeline`]. +These combined pipelines wrap the [`KandinskyV22PriorPipeline`] and [`KandinskyV22Pipeline`], [`KandinskyV22Img2ImgPipeline`], [`KandinskyV22InpaintPipeline`] respectively into a single +pipeline for a simpler user experience + + + +## Available Pipelines: + +| Pipeline | Tasks | +|---|---| +| [pipeline_kandinsky2_2.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py) | *Text-to-Image Generation* | +| [pipeline_kandinsky2_2_combined.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py) | *End-to-end Text-to-Image, image-to-image, Inpainting Generation* | +| [pipeline_kandinsky2_2_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py) | *Image-Guided Image Generation* | +| [pipeline_kandinsky2_2_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py) | *Image-Guided Image Generation* | +| [pipeline_kandinsky2_2_controlnet.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py) | *Image-Guided Image Generation* | +| [pipeline_kandinsky2_2_controlnet_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py) | *Image-Guided Image Generation* | + + +### KandinskyV22Pipeline + +[[autodoc]] KandinskyV22Pipeline + - all + - __call__ + +### KandinskyV22ControlnetPipeline + +[[autodoc]] KandinskyV22ControlnetPipeline + - all + - __call__ + +### KandinskyV22ControlnetImg2ImgPipeline + +[[autodoc]] KandinskyV22ControlnetImg2ImgPipeline + - all + - __call__ + +### KandinskyV22Img2ImgPipeline + +[[autodoc]] KandinskyV22Img2ImgPipeline + - all + - __call__ + +### KandinskyV22InpaintPipeline + +[[autodoc]] KandinskyV22InpaintPipeline + - all + - __call__ + +### KandinskyV22PriorPipeline + +[[autodoc]] ## KandinskyV22PriorPipeline + - all + - __call__ + - interpolate + +### KandinskyV22PriorEmb2EmbPipeline + +[[autodoc]] KandinskyV22PriorEmb2EmbPipeline + - all + - __call__ + - interpolate + +### KandinskyV22CombinedPipeline + +[[autodoc]] KandinskyV22CombinedPipeline + - all + - __call__ + +### KandinskyV22Img2ImgCombinedPipeline + +[[autodoc]] KandinskyV22Img2ImgCombinedPipeline + - all + - __call__ + +### KandinskyV22InpaintCombinedPipeline + +[[autodoc]] KandinskyV22InpaintCombinedPipeline + - all + - __call__ diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index df7cf9f189fc..66d306720aaf 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -86,19 +86,19 @@ ] ) -AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( +_AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( [ ("kandinsky", KandinskyPipeline), ("kandinsky22", KandinskyV22Pipeline), ] ) -AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( +_AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( [ ("kandinsky", KandinskyImg2ImgPipeline), ("kandinsky22", KandinskyV22Img2ImgPipeline), ] ) -AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( +_AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( [ ("kandinsky", KandinskyInpaintPipeline), ("kandinsky22", KandinskyV22InpaintPipeline), @@ -109,23 +109,23 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, - AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING, - AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING, - AUTO_INPAINT_DECODER_PIPELINES_MAPPING, + _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING, + _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING, + _AUTO_INPAINT_DECODER_PIPELINES_MAPPING, ] def _get_connected_pipeline(pipeline_cls): # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder - if pipeline_cls in AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): + if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False ) - if pipeline_cls in AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): + if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False ) - if pipeline_cls in AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): + if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) diff --git a/src/diffusers/pipelines/kandinsky2_2/__init__.py b/src/diffusers/pipelines/kandinsky2_2/__init__.py index 49326964d874..4997a2e4056b 100644 --- a/src/diffusers/pipelines/kandinsky2_2/__init__.py +++ b/src/diffusers/pipelines/kandinsky2_2/__init__.py @@ -12,14 +12,14 @@ from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_kandinsky2_2 import KandinskyV22Pipeline + from .pipeline_kandinsky2_2_combined import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, + ) from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline - from .pipeline_kandinsky2_2_combined import ( - KandinskyV22CombinedPipeline, - KandinskyV22Img2ImgCombinedPipeline, - KandinskyV22InpaintCombinedPipeline, - ) \ No newline at end of file From c822e0705c2960943ac4b2a4bac6a53b4f92110e Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 13:35:50 +0200 Subject: [PATCH 20/24] docs --- docs/source/en/api/pipelines/kandinsky_v22.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/api/pipelines/kandinsky_v22.mdx b/docs/source/en/api/pipelines/kandinsky_v22.mdx index 5ffb1ed22724..caa21925be18 100644 --- a/docs/source/en/api/pipelines/kandinsky_v22.mdx +++ b/docs/source/en/api/pipelines/kandinsky_v22.mdx @@ -7,7 +7,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -## Kandinsky 2.2 +# Kandinsky 2.2 The Kandinsky 2.2 release includes robust new text-to-image models that support text-to-image generation, image-to-image generation, image interpolation, and text-guided image inpainting. The general workflow to perform these tasks using Kandinsky 2.2 is the same as in Kandinsky 2.1. First, you will need to use a prior pipeline to generate image embeddings based on your text prompt, and then use one of the image decoding pipelines to generate the output image. The only difference is that in Kandinsky 2.2, all of the decoding pipelines no longer accept the `prompt` input, and the image generation process is conditioned with only `image_embeds` and `negative_image_embeds`. From 3300b7e7cfedd198b5437eb475da45b42b04425f Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 15:22:18 +0200 Subject: [PATCH 21/24] correct docs --- .../source/en/api/pipelines/kandinsky_v22.mdx | 2 +- .../kandinsky/pipeline_kandinsky_combined.py | 104 ++++++++++++++++- .../kandinsky2_2/pipeline_kandinsky2_2.py | 3 +- .../pipeline_kandinsky2_2_combined.py | 108 +++++++++++++++++- .../pipeline_kandinsky2_2_inpainting.py | 3 +- 5 files changed, 212 insertions(+), 8 deletions(-) diff --git a/docs/source/en/api/pipelines/kandinsky_v22.mdx b/docs/source/en/api/pipelines/kandinsky_v22.mdx index caa21925be18..074bc5b8d64c 100644 --- a/docs/source/en/api/pipelines/kandinsky_v22.mdx +++ b/docs/source/en/api/pipelines/kandinsky_v22.mdx @@ -311,7 +311,7 @@ pipeline for a simpler user experience ### KandinskyV22PriorPipeline -[[autodoc]] ## KandinskyV22PriorPipeline +[[autodoc]] KandinskyV22PriorPipeline - all - __call__ - interpolate diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 8edd7c3411be..47725d920c3a 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -39,7 +39,72 @@ TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py + from diffusers import AutoPipelineForText2Image + import torch + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + image = pipe(prompt=prompt, num_inference_steps=25).images[0] + ``` +""" + +IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForImage2Image + import torch + import requests + from io import BytesIO + from PIL import Image + import os + + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + image.thumbnail((768, 768)) + + image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] + ``` +""" + +INPAINT_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForInpainting + from diffusers.utils import load_image + import torch + import numpy as np + + pipe = AutoPipelineForInpainting.from_pretrained( + "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + original_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" + ) + + mask = np.zeros((768, 768), dtype=np.float32) + # Let's mask out an area above the cat's head + mask[:250, 250:-250] = 1 + + image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] ``` """ @@ -62,6 +127,17 @@ class KandinskyCombinedPipeline(DiffusionPipeline): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True @@ -261,6 +337,17 @@ class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True @@ -330,7 +417,7 @@ def set_progress_bar_config(self, **kwargs): self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() - @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], @@ -466,7 +553,7 @@ def __call__( class KandinskyInpaintCombinedPipeline(DiffusionPipeline): """ - Combined Pipeline for image-to-image generation using Kandinsky + Combined Pipeline for generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) @@ -482,6 +569,17 @@ class KandinskyInpaintCombinedPipeline(DiffusionPipeline): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True @@ -551,7 +649,7 @@ def set_progress_bar_config(self, **kwargs): self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() - @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index 5e5b83bfa995..ccbdae09dc08 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -153,8 +153,9 @@ def __call__( return_dict: bool = True, ): """ - Args: Function invoked when calling the pipeline for generation. + + Args: image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index 75bd711dd946..380eef8c6388 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -36,7 +36,72 @@ TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py + from diffusers import AutoPipelineForText2Image + import torch + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + image = pipe(prompt=prompt, num_inference_steps=25).images[0] + ``` +""" + +IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForImage2Image + import torch + import requests + from io import BytesIO + from PIL import Image + import os + + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + image.thumbnail((768, 768)) + + image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] + ``` +""" + +INPAINT_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForInpainting + from diffusers.utils import load_image + import torch + import numpy as np + + pipe = AutoPipelineForInpainting.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + original_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" + ) + + mask = np.zeros((768, 768), dtype=np.float32) + # Let's mask out an area above the cat's head + mask[:250, 250:-250] = 1 + + image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] ``` """ @@ -55,6 +120,19 @@ class KandinskyV22CombinedPipeline(DiffusionPipeline): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. """ _load_connected_pipes = True @@ -243,6 +321,19 @@ class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. """ _load_connected_pipes = True @@ -306,7 +397,7 @@ def set_progress_bar_config(self, **kwargs): self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() - @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], @@ -453,6 +544,19 @@ class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. """ _load_connected_pipes = True @@ -516,7 +620,7 @@ def set_progress_bar_config(self, **kwargs): self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() - @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index f02a78f9b044..6dcf62ca96aa 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -325,8 +325,9 @@ def __call__( return_dict: bool = True, ): """ - Args: Function invoked when calling the pipeline for generation. + + Args: image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. image (`PIL.Image.Image`): From 4f9e3b278b6a9c59cdb87eb6b811c4ab1f357813 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 16:04:00 +0200 Subject: [PATCH 22/24] fix tests --- .../kandinsky/pipeline_kandinsky_prior.py | 2 +- test_corrections.txt | 12 ----------- .../kandinsky/test_kandinsky_combined.py | 20 +++++++++++++++---- .../kandinsky_v22/test_kandinsky_combined.py | 20 +++++++++++++++---- .../kandinsky_v22/test_kandinsky_img2img.py | 3 ++- 5 files changed, 35 insertions(+), 22 deletions(-) delete mode 100644 test_corrections.txt diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index 947b6bfd0f8b..57d8c7beb97a 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -181,7 +181,7 @@ def interpolate( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, negative_prior_prompt: Optional[str] = None, - negative_prompt: Union[str] = "", + negative_prompt: str = "", guidance_scale: float = 4.0, device=None, ): diff --git a/test_corrections.txt b/test_corrections.txt deleted file mode 100644 index 3fda47895c33..000000000000 --- a/test_corrections.txt +++ /dev/null @@ -1,12 +0,0 @@ -tests/pipelines/kandinsky_v22/test_kandinsky.py;KandinskyV22PipelineFastTests;test_kandinsky;expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) -tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineCombinedFastTests;test_kandinsky;expected_slice = np.array([0.0349, 0.2685, 0.2658, 0.1627, 0.0229, 0.0680, 0.6497, 0.5459, 0.4795]) -tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineImg2ImgCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4680, 0.5258, 0.5191, 0.4720, 0.4972, 0.4856, 0.4958, 0.4713, 0.4833]) -tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineInpaintCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4946, 0.4839, 0.4792, 0.4892, 0.4759, 0.4842, 0.4729, 0.4696, 0.4774]) -tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py;KandinskyV22Img2ImgPipelineFastTests;test_kandinsky_img2img;expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) -tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py;KandinskyV22InpaintPipelineFastTests;test_kandinsky_inpaint;expected_slice = np.array([0.5008, 0.4897, 0.4887, 0.4937, 0.4808, 0.4936, 0.4739, 0.4703, 0.4828]) -tests/pipelines/kandinsky_v22/test_kandinsky.py;KandinskyV22PipelineFastTests;test_kandinsky;expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) -tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineCombinedFastTests;test_kandinsky;expected_slice = np.array([0.0349, 0.2685, 0.2658, 0.1627, 0.0229, 0.0680, 0.6497, 0.5459, 0.4795]) -tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineImg2ImgCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4680, 0.5258, 0.5191, 0.4720, 0.4972, 0.4856, 0.4958, 0.4713, 0.4833]) -tests/pipelines/kandinsky_v22/test_kandinsky_combined.py;KandinskyV22PipelineInpaintCombinedFastTests;test_kandinsky;expected_slice = np.array([0.4946, 0.4839, 0.4792, 0.4892, 0.4759, 0.4842, 0.4729, 0.4696, 0.4774]) -tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py;KandinskyV22Img2ImgPipelineFastTests;test_kandinsky_img2img;expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) -tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py;KandinskyV22InpaintPipelineFastTests;test_kandinsky_inpaint;expected_slice = np.array([0.5008, 0.4897, 0.4887, 0.4937, 0.4808, 0.4936, 0.4739, 0.4703, 0.4828]) diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index c42a7f351319..491c2d4b827b 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -19,7 +19,7 @@ from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from diffusers.utils import torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, print_tensor_test, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies @@ -92,10 +92,11 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.1579, 0.2966, 0.3703, 0.1633, 0.2813, 0.3113, 0.6867, 0.6880, 0.4141]) + expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -131,6 +132,9 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgCombinedPipeline @@ -189,10 +193,11 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.4451, 0.4793, 0.4002, 0.4480, 0.4170, 0.4208, 0.5097, 0.5002, 0.4693]) + expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -228,6 +233,9 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintCombinedPipeline @@ -286,10 +294,11 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.0764, 0.1887, 0.0870, 0.0000, 0.0000, 0.0765, 0.3652, 0.4669, 0.4194]) + expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -324,3 +333,6 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py index b3bf0ceba9f4..2aed8a64074a 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py @@ -23,7 +23,7 @@ KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils import torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, print_tensor_test, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies @@ -96,10 +96,11 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.0349, 0.2685, 0.2658, 0.1627, 0.0229, 0.0680, 0.6497, 0.5459, 0.4795]) + expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -135,6 +136,9 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline @@ -193,10 +197,11 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.4680, 0.5258, 0.5191, 0.4720, 0.4972, 0.4856, 0.4958, 0.4713, 0.4833]) + expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -232,6 +237,9 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline @@ -290,10 +298,11 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.4946, 0.4839, 0.4792, 0.4892, 0.4759, 0.4842, 0.4729, 0.4696, 0.4774]) + expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @@ -328,3 +337,6 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py index 17f27d0d7804..6a35f6629564 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py @@ -29,7 +29,7 @@ VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, print_tensor_test, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference @@ -214,6 +214,7 @@ def test_kandinsky_img2img(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) From 7f038f522d5c42483e4addcd25f472af3aaf4853 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 16:52:48 +0200 Subject: [PATCH 23/24] add warning --- .../kandinsky/pipeline_kandinsky_inpaint.py | 15 +++++++++++++++ .../pipeline_kandinsky2_2_inpainting.py | 16 ++++++++++++++++ .../kandinsky/test_kandinsky_combined.py | 5 +---- .../kandinsky_v22/test_kandinsky_combined.py | 5 +---- .../kandinsky_v22/test_kandinsky_img2img.py | 3 +-- .../kandinsky_v22/test_kandinsky_inpaint.py | 2 -- 6 files changed, 34 insertions(+), 12 deletions(-) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index f12e936243d0..dda0c3faa7fd 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -19,11 +19,13 @@ import PIL import torch import torch.nn.functional as F +from packaging import version from PIL import Image from transformers import ( XLMRobertaTokenizer, ) +from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler from ...utils import ( @@ -275,6 +277,7 @@ def __init__( scheduler=scheduler, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + self._warn_has_been_called = False # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): @@ -500,6 +503,18 @@ def __call__( Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ + if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( + "0.22.0.dev0" + ): + logger.warn( + "Please note that the expected format of `mask_image` has recently been changed. " + "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " + "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " + "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " + "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " + "This warning will be surpressed after the first inference call and will be removed in diffusers>0.22.0" + ) + self._warn_has_been_called = True # Define call parameters if isinstance(prompt, str): diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 6dcf62ca96aa..2e0a0d833740 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -19,8 +19,10 @@ import PIL import torch import torch.nn.functional as F +from packaging import version from PIL import Image +from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import ( @@ -265,6 +267,7 @@ def __init__( movq=movq, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + self._warn_has_been_called = False # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): @@ -379,6 +382,19 @@ def __call__( Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ + if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( + "0.22.0.dev0" + ): + logger.warn( + "Please note that the expected format of `mask_image` has recently been changed. " + "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " + "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " + "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " + "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " + "This warning will be surpressed after the first inference call and will be removed in diffusers>0.22.0" + ) + self._warn_has_been_called = True + device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index 491c2d4b827b..21c8e78cfade 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -19,7 +19,7 @@ from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from diffusers.utils import torch_device -from diffusers.utils.testing_utils import enable_full_determinism, print_tensor_test, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies @@ -92,7 +92,6 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) @@ -193,7 +192,6 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) @@ -294,7 +292,6 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py index 2aed8a64074a..666ea30bd0fd 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py @@ -23,7 +23,7 @@ KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils import torch_device -from diffusers.utils.testing_utils import enable_full_determinism, print_tensor_test, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies @@ -96,7 +96,6 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) @@ -197,7 +196,6 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) @@ -298,7 +296,6 @@ def test_kandinsky(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py index 6a35f6629564..17f27d0d7804 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py @@ -29,7 +29,7 @@ VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, print_tensor_test, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference @@ -214,7 +214,6 @@ def test_kandinsky_img2img(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print_tensor_test(image_from_tuple_slice) assert image.shape == (1, 64, 64, 3) diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py index e08e2c08f2d9..436c240e1ac8 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py @@ -217,8 +217,6 @@ def test_kandinsky_inpaint(self): image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print(f"image.shape {image.shape}") - assert image.shape == (1, 64, 64, 3) expected_slice = np.array( From 1acbcbed4d3d53b4c8b2de5764de5a4b2090870b Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 26 Jul 2023 16:53:40 +0200 Subject: [PATCH 24/24] correct docs --- src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py | 1 + .../pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 47725d920c3a..c7f439fbabb6 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -45,6 +45,7 @@ pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) + pipe.enable_model_cpu_offload() prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index 380eef8c6388..977a82fdbc9f 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -42,6 +42,7 @@ pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) + pipe.enable_model_cpu_offload() prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"