diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml
index 53718f8d0e7a..2a9de8b3f24b 100644
--- a/docs/source/en/_toctree.yml
+++ b/docs/source/en/_toctree.yml
@@ -206,6 +206,8 @@
title: InstructPix2Pix
- local: api/pipelines/kandinsky
title: Kandinsky
+ - local: api/pipelines/kandinsky_v22
+ title: Kandinsky 2.2
- local: api/pipelines/latent_diffusion
title: Latent Diffusion
- local: api/pipelines/panorama
diff --git a/docs/source/en/api/pipelines/kandinsky.mdx b/docs/source/en/api/pipelines/kandinsky.mdx
index 6b6c64a08951..79c602b8bc07 100644
--- a/docs/source/en/api/pipelines/kandinsky.mdx
+++ b/docs/source/en/api/pipelines/kandinsky.mdx
@@ -212,9 +212,9 @@ init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
-mask = np.ones((768, 768), dtype=np.float32)
+mask = np.zeros((768, 768), dtype=np.float32)
# Let's mask out an area above the cat's head
-mask[:250, 250:-250] = 0
+mask[:250, 250:-250] = 1
out = pipe(
prompt,
@@ -276,208 +276,6 @@ image.save("starry_cat.png")
```

-
-### Text-to-Image Generation with ControlNet Conditioning
-
-In the following, we give a simple example of how to use [`KandinskyV22ControlnetPipeline`] to add control to the text-to-image generation with a depth image.
-
-First, let's take an image and extract its depth map.
-
-```python
-from diffusers.utils import load_image
-
-img = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png"
-).resize((768, 768))
-```
-
-
-We can use the `depth-estimation` pipeline from transformers to process the image and retrieve its depth map.
-
-```python
-import torch
-import numpy as np
-
-from transformers import pipeline
-from diffusers.utils import load_image
-
-
-def make_hint(image, depth_estimator):
- image = depth_estimator(image)["depth"]
- image = np.array(image)
- image = image[:, :, None]
- image = np.concatenate([image, image, image], axis=2)
- detected_map = torch.from_numpy(image).float() / 255.0
- hint = detected_map.permute(2, 0, 1)
- return hint
-
-
-depth_estimator = pipeline("depth-estimation")
-hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
-```
-Now, we load the prior pipeline and the text-to-image controlnet pipeline
-
-```python
-from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
-
-pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
- "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
-)
-pipe_prior = pipe_prior.to("cuda")
-
-pipe = KandinskyV22ControlnetPipeline.from_pretrained(
- "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
-)
-pipe = pipe.to("cuda")
-```
-
-We pass the prompt and negative prompt through the prior to generate image embeddings
-
-```python
-prompt = "A robot, 4k photo"
-
-negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
-
-generator = torch.Generator(device="cuda").manual_seed(43)
-image_emb, zero_image_emb = pipe_prior(
- prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
-).to_tuple()
-```
-
-Now we can pass the image embeddings and the depth image we extracted to the controlnet pipeline. With Kandinsky 2.2, only prior pipelines accept `prompt` input. You do not need to pass the prompt to the controlnet pipeline.
-
-```python
-images = pipe(
- image_embeds=image_emb,
- negative_image_embeds=zero_image_emb,
- hint=hint,
- num_inference_steps=50,
- generator=generator,
- height=768,
- width=768,
-).images
-
-images[0].save("robot_cat.png")
-```
-
-The output image looks as follow:
-
-
-### Image-to-Image Generation with ControlNet Conditioning
-
-Kandinsky 2.2 also includes a [`KandinskyV22ControlnetImg2ImgPipeline`] that will allow you to add control to the image generation process with both the image and its depth map. This pipeline works really well with [`KandinskyV22PriorEmb2EmbPipeline`], which generates image embeddings based on both a text prompt and an image.
-
-For our robot cat example, we will pass the prompt and cat image together to the prior pipeline to generate an image embedding. We will then use that image embedding and the depth map of the cat to further control the image generation process.
-
-We can use the same cat image and its depth map from the last example.
-
-```python
-import torch
-import numpy as np
-
-from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline
-from diffusers.utils import load_image
-from transformers import pipeline
-
-img = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/cat.png"
-).resize((768, 768))
-
-
-def make_hint(image, depth_estimator):
- image = depth_estimator(image)["depth"]
- image = np.array(image)
- image = image[:, :, None]
- image = np.concatenate([image, image, image], axis=2)
- detected_map = torch.from_numpy(image).float() / 255.0
- hint = detected_map.permute(2, 0, 1)
- return hint
-
-
-depth_estimator = pipeline("depth-estimation")
-hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
-
-pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(
- "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
-)
-pipe_prior = pipe_prior.to("cuda")
-
-pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(
- "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
-)
-pipe = pipe.to("cuda")
-
-prompt = "A robot, 4k photo"
-negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
-
-generator = torch.Generator(device="cuda").manual_seed(43)
-
-# run prior pipeline
-
-img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator)
-negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator)
-
-# run controlnet img2img pipeline
-images = pipe(
- image=img,
- strength=0.5,
- image_embeds=img_emb.image_embeds,
- negative_image_embeds=negative_emb.image_embeds,
- hint=hint,
- num_inference_steps=50,
- generator=generator,
- height=768,
- width=768,
-).images
-
-images[0].save("robot_cat.png")
-```
-
-Here is the output. Compared with the output from our text-to-image controlnet example, it kept a lot more cat facial details from the original image and worked into the robot style we asked for.
-
-
-
-## Kandinsky 2.2
-
-The Kandinsky 2.2 release includes robust new text-to-image models that support text-to-image generation, image-to-image generation, image interpolation, and text-guided image inpainting. The general workflow to perform these tasks using Kandinsky 2.2 is the same as in Kandinsky 2.1. First, you will need to use a prior pipeline to generate image embeddings based on your text prompt, and then use one of the image decoding pipelines to generate the output image. The only difference is that in Kandinsky 2.2, all of the decoding pipelines no longer accept the `prompt` input, and the image generation process is conditioned with only `image_embeds` and `negative_image_embeds`.
-
-Let's look at an example of how to perform text-to-image generation using Kandinsky 2.2.
-
-First, let's create the prior pipeline and text-to-image pipeline with Kandinsky 2.2 checkpoints.
-
-```python
-from diffusers import DiffusionPipeline
-import torch
-
-pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16)
-pipe_prior.to("cuda")
-
-t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
-t2i_pipe.to("cuda")
-```
-
-You can then use `pipe_prior` to generate image embeddings.
-
-```python
-prompt = "portrait of a women, blue eyes, cinematic"
-negative_prompt = "low quality, bad quality"
-
-image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple()
-```
-
-Now you can pass these embeddings to the text-to-image pipeline. When using Kandinsky 2.2 you don't need to pass the `prompt` (but you do with the previous version, Kandinsky 2.1).
-
-```
-image = t2i_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[
- 0
-]
-image.save("portrait.png")
-```
-
-
-We used the text-to-image pipeline as an example, but the same process applies to all decoding pipelines in Kandinsky 2.2. For more information, please refer to our API section for each pipeline.
-
-
## Optimization
Running Kandinsky in inference requires running both a first prior pipeline: [`KandinskyPriorPipeline`]
@@ -530,85 +328,63 @@ t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=T
After compilation you should see a very fast inference time. For more information,
feel free to have a look at [Our PyTorch 2.0 benchmark](https://huggingface.co/docs/diffusers/main/en/optimization/torch2.0).
+
+
+To generate images directly from a single pipeline, you can use [`KandinskyCombinedPipeline`], [`KandinskyImg2ImgCombinedPipeline`], [`KandinskyInpaintCombinedPipeline`].
+These combined pipelines wrap the [`KandinskyPriorPipeline`] and [`KandinskyPipeline`], [`KandinskyImg2ImgPipeline`], [`KandinskyInpaintPipeline`] respectively into a single
+pipeline for a simpler user experience
+
+
+
## Available Pipelines:
| Pipeline | Tasks |
|---|---|
-| [pipeline_kandinsky2_2.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py) | *Text-to-Image Generation* |
| [pipeline_kandinsky.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py) | *Text-to-Image Generation* |
-| [pipeline_kandinsky2_2_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py) | *Image-Guided Image Generation* |
+| [pipeline_kandinsky_combined.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky_combined.py) | *End-to-end Text-to-Image, image-to-image, Inpainting Generation* |
| [pipeline_kandinsky_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py) | *Image-Guided Image Generation* |
-| [pipeline_kandinsky2_2_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py) | *Image-Guided Image Generation* |
| [pipeline_kandinsky_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py) | *Image-Guided Image Generation* |
-| [pipeline_kandinsky2_2_controlnet.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py) | *Image-Guided Image Generation* |
-| [pipeline_kandinsky2_2_controlnet_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py) | *Image-Guided Image Generation* |
-
-
-### KandinskyV22Pipeline
-
-[[autodoc]] KandinskyV22Pipeline
- - all
- - __call__
-
-### KandinskyV22ControlnetPipeline
-[[autodoc]] KandinskyV22ControlnetPipeline
- - all
- - __call__
-### KandinskyV22ControlnetImg2ImgPipeline
+### KandinskyPriorPipeline
-[[autodoc]] KandinskyV22ControlnetImg2ImgPipeline
+[[autodoc]] KandinskyPriorPipeline
- all
- __call__
+ - interpolate
+
+### KandinskyPipeline
-### KandinskyV22Img2ImgPipeline
-
-[[autodoc]] KandinskyV22Img2ImgPipeline
+[[autodoc]] KandinskyPipeline
- all
- __call__
-### KandinskyV22InpaintPipeline
+### KandinskyImg2ImgPipeline
-[[autodoc]] KandinskyV22InpaintPipeline
+[[autodoc]] KandinskyImg2ImgPipeline
- all
- __call__
-### KandinskyV22PriorPipeline
-
-[[autodoc]] ## KandinskyV22PriorPipeline
- - all
- - __call__
- - interpolate
-
-### KandinskyV22PriorEmb2EmbPipeline
+### KandinskyInpaintPipeline
-[[autodoc]] KandinskyV22PriorEmb2EmbPipeline
+[[autodoc]] KandinskyInpaintPipeline
- all
- __call__
- - interpolate
-### KandinskyPriorPipeline
+### KandinskyCombinedPipeline
-[[autodoc]] KandinskyPriorPipeline
+[[autodoc]] KandinskyCombinedPipeline
- all
- __call__
- - interpolate
-
-### KandinskyPipeline
-[[autodoc]] KandinskyPipeline
- - all
- - __call__
-
-### KandinskyImg2ImgPipeline
+### KandinskyImg2ImgCombinedPipeline
-[[autodoc]] KandinskyImg2ImgPipeline
+[[autodoc]] KandinskyImg2ImgCombinedPipeline
- all
- __call__
-### KandinskyInpaintPipeline
+### KandinskyInpaintCombinedPipeline
-[[autodoc]] KandinskyInpaintPipeline
+[[autodoc]] KandinskyInpaintCombinedPipeline
- all
- __call__
diff --git a/docs/source/en/api/pipelines/kandinsky_v22.mdx b/docs/source/en/api/pipelines/kandinsky_v22.mdx
new file mode 100644
index 000000000000..074bc5b8d64c
--- /dev/null
+++ b/docs/source/en/api/pipelines/kandinsky_v22.mdx
@@ -0,0 +1,342 @@
+
+
+# Kandinsky 2.2
+
+The Kandinsky 2.2 release includes robust new text-to-image models that support text-to-image generation, image-to-image generation, image interpolation, and text-guided image inpainting. The general workflow to perform these tasks using Kandinsky 2.2 is the same as in Kandinsky 2.1. First, you will need to use a prior pipeline to generate image embeddings based on your text prompt, and then use one of the image decoding pipelines to generate the output image. The only difference is that in Kandinsky 2.2, all of the decoding pipelines no longer accept the `prompt` input, and the image generation process is conditioned with only `image_embeds` and `negative_image_embeds`.
+
+Let's look at an example of how to perform text-to-image generation using Kandinsky 2.2.
+
+First, let's create the prior pipeline and text-to-image pipeline with Kandinsky 2.2 checkpoints.
+
+```python
+from diffusers import DiffusionPipeline
+import torch
+
+pipe_prior = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16)
+pipe_prior.to("cuda")
+
+t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
+t2i_pipe.to("cuda")
+```
+
+You can then use `pipe_prior` to generate image embeddings.
+
+```python
+prompt = "portrait of a women, blue eyes, cinematic"
+negative_prompt = "low quality, bad quality"
+
+image_embeds, negative_image_embeds = pipe_prior(prompt, guidance_scale=1.0).to_tuple()
+```
+
+Now you can pass these embeddings to the text-to-image pipeline. When using Kandinsky 2.2 you don't need to pass the `prompt` (but you do with the previous version, Kandinsky 2.1).
+
+```
+image = t2i_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[
+ 0
+]
+image.save("portrait.png")
+```
+
+
+We used the text-to-image pipeline as an example, but the same process applies to all decoding pipelines in Kandinsky 2.2. For more information, please refer to our API section for each pipeline.
+
+### Text-to-Image Generation with ControlNet Conditioning
+
+In the following, we give a simple example of how to use [`KandinskyV22ControlnetPipeline`] to add control to the text-to-image generation with a depth image.
+
+First, let's take an image and extract its depth map.
+
+```python
+from diffusers.utils import load_image
+
+img = load_image(
+ "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png"
+).resize((768, 768))
+```
+
+
+We can use the `depth-estimation` pipeline from transformers to process the image and retrieve its depth map.
+
+```python
+import torch
+import numpy as np
+
+from transformers import pipeline
+from diffusers.utils import load_image
+
+
+def make_hint(image, depth_estimator):
+ image = depth_estimator(image)["depth"]
+ image = np.array(image)
+ image = image[:, :, None]
+ image = np.concatenate([image, image, image], axis=2)
+ detected_map = torch.from_numpy(image).float() / 255.0
+ hint = detected_map.permute(2, 0, 1)
+ return hint
+
+
+depth_estimator = pipeline("depth-estimation")
+hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
+```
+Now, we load the prior pipeline and the text-to-image controlnet pipeline
+
+```python
+from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
+
+pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
+)
+pipe_prior = pipe_prior.to("cuda")
+
+pipe = KandinskyV22ControlnetPipeline.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
+)
+pipe = pipe.to("cuda")
+```
+
+We pass the prompt and negative prompt through the prior to generate image embeddings
+
+```python
+prompt = "A robot, 4k photo"
+
+negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
+
+generator = torch.Generator(device="cuda").manual_seed(43)
+image_emb, zero_image_emb = pipe_prior(
+ prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
+).to_tuple()
+```
+
+Now we can pass the image embeddings and the depth image we extracted to the controlnet pipeline. With Kandinsky 2.2, only prior pipelines accept `prompt` input. You do not need to pass the prompt to the controlnet pipeline.
+
+```python
+images = pipe(
+ image_embeds=image_emb,
+ negative_image_embeds=zero_image_emb,
+ hint=hint,
+ num_inference_steps=50,
+ generator=generator,
+ height=768,
+ width=768,
+).images
+
+images[0].save("robot_cat.png")
+```
+
+The output image looks as follow:
+
+
+### Image-to-Image Generation with ControlNet Conditioning
+
+Kandinsky 2.2 also includes a [`KandinskyV22ControlnetImg2ImgPipeline`] that will allow you to add control to the image generation process with both the image and its depth map. This pipeline works really well with [`KandinskyV22PriorEmb2EmbPipeline`], which generates image embeddings based on both a text prompt and an image.
+
+For our robot cat example, we will pass the prompt and cat image together to the prior pipeline to generate an image embedding. We will then use that image embedding and the depth map of the cat to further control the image generation process.
+
+We can use the same cat image and its depth map from the last example.
+
+```python
+import torch
+import numpy as np
+
+from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline
+from diffusers.utils import load_image
+from transformers import pipeline
+
+img = load_image(
+ "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/cat.png"
+).resize((768, 768))
+
+
+def make_hint(image, depth_estimator):
+ image = depth_estimator(image)["depth"]
+ image = np.array(image)
+ image = image[:, :, None]
+ image = np.concatenate([image, image, image], axis=2)
+ detected_map = torch.from_numpy(image).float() / 255.0
+ hint = detected_map.permute(2, 0, 1)
+ return hint
+
+
+depth_estimator = pipeline("depth-estimation")
+hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
+
+pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
+)
+pipe_prior = pipe_prior.to("cuda")
+
+pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
+)
+pipe = pipe.to("cuda")
+
+prompt = "A robot, 4k photo"
+negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
+
+generator = torch.Generator(device="cuda").manual_seed(43)
+
+# run prior pipeline
+
+img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator)
+negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator)
+
+# run controlnet img2img pipeline
+images = pipe(
+ image=img,
+ strength=0.5,
+ image_embeds=img_emb.image_embeds,
+ negative_image_embeds=negative_emb.image_embeds,
+ hint=hint,
+ num_inference_steps=50,
+ generator=generator,
+ height=768,
+ width=768,
+).images
+
+images[0].save("robot_cat.png")
+```
+
+Here is the output. Compared with the output from our text-to-image controlnet example, it kept a lot more cat facial details from the original image and worked into the robot style we asked for.
+
+
+
+## Optimization
+
+Running Kandinsky in inference requires running both a first prior pipeline: [`KandinskyPriorPipeline`]
+and a second image decoding pipeline which is one of [`KandinskyPipeline`], [`KandinskyImg2ImgPipeline`], or [`KandinskyInpaintPipeline`].
+
+The bulk of the computation time will always be the second image decoding pipeline, so when looking
+into optimizing the model, one should look into the second image decoding pipeline.
+
+When running with PyTorch < 2.0, we strongly recommend making use of [`xformers`](https://github.com/facebookresearch/xformers)
+to speed-up the optimization. This can be done by simply running:
+
+```py
+from diffusers import DiffusionPipeline
+import torch
+
+t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
+t2i_pipe.enable_xformers_memory_efficient_attention()
+```
+
+When running on PyTorch >= 2.0, PyTorch's SDPA attention will automatically be used. For more information on
+PyTorch's SDPA, feel free to have a look at [this blog post](https://pytorch.org/blog/accelerated-diffusers-pt-20/).
+
+To have explicit control , you can also manually set the pipeline to use PyTorch's 2.0 efficient attention:
+
+```py
+from diffusers.models.attention_processor import AttnAddedKVProcessor2_0
+
+t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0())
+```
+
+The slowest and most memory intense attention processor is the default `AttnAddedKVProcessor` processor.
+We do **not** recommend using it except for testing purposes or cases where very high determistic behaviour is desired.
+You can set it with:
+
+```py
+from diffusers.models.attention_processor import AttnAddedKVProcessor
+
+t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor())
+```
+
+With PyTorch >= 2.0, you can also use Kandinsky with `torch.compile` which depending
+on your hardware can signficantly speed-up your inference time once the model is compiled.
+To use Kandinsksy with `torch.compile`, you can do:
+
+```py
+t2i_pipe.unet.to(memory_format=torch.channels_last)
+t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=True)
+```
+
+After compilation you should see a very fast inference time. For more information,
+feel free to have a look at [Our PyTorch 2.0 benchmark](https://huggingface.co/docs/diffusers/main/en/optimization/torch2.0).
+
+
+
+To generate images directly from a single pipeline, you can use [`KandinskyV22CombinedPipeline`], [`KandinskyV22Img2ImgCombinedPipeline`], [`KandinskyV22InpaintCombinedPipeline`].
+These combined pipelines wrap the [`KandinskyV22PriorPipeline`] and [`KandinskyV22Pipeline`], [`KandinskyV22Img2ImgPipeline`], [`KandinskyV22InpaintPipeline`] respectively into a single
+pipeline for a simpler user experience
+
+
+
+## Available Pipelines:
+
+| Pipeline | Tasks |
+|---|---|
+| [pipeline_kandinsky2_2.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py) | *Text-to-Image Generation* |
+| [pipeline_kandinsky2_2_combined.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py) | *End-to-end Text-to-Image, image-to-image, Inpainting Generation* |
+| [pipeline_kandinsky2_2_inpaint.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py) | *Image-Guided Image Generation* |
+| [pipeline_kandinsky2_2_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py) | *Image-Guided Image Generation* |
+| [pipeline_kandinsky2_2_controlnet.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py) | *Image-Guided Image Generation* |
+| [pipeline_kandinsky2_2_controlnet_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py) | *Image-Guided Image Generation* |
+
+
+### KandinskyV22Pipeline
+
+[[autodoc]] KandinskyV22Pipeline
+ - all
+ - __call__
+
+### KandinskyV22ControlnetPipeline
+
+[[autodoc]] KandinskyV22ControlnetPipeline
+ - all
+ - __call__
+
+### KandinskyV22ControlnetImg2ImgPipeline
+
+[[autodoc]] KandinskyV22ControlnetImg2ImgPipeline
+ - all
+ - __call__
+
+### KandinskyV22Img2ImgPipeline
+
+[[autodoc]] KandinskyV22Img2ImgPipeline
+ - all
+ - __call__
+
+### KandinskyV22InpaintPipeline
+
+[[autodoc]] KandinskyV22InpaintPipeline
+ - all
+ - __call__
+
+### KandinskyV22PriorPipeline
+
+[[autodoc]] KandinskyV22PriorPipeline
+ - all
+ - __call__
+ - interpolate
+
+### KandinskyV22PriorEmb2EmbPipeline
+
+[[autodoc]] KandinskyV22PriorEmb2EmbPipeline
+ - all
+ - __call__
+ - interpolate
+
+### KandinskyV22CombinedPipeline
+
+[[autodoc]] KandinskyV22CombinedPipeline
+ - all
+ - __call__
+
+### KandinskyV22Img2ImgCombinedPipeline
+
+[[autodoc]] KandinskyV22Img2ImgCombinedPipeline
+ - all
+ - __call__
+
+### KandinskyV22InpaintCombinedPipeline
+
+[[autodoc]] KandinskyV22InpaintCombinedPipeline
+ - all
+ - __call__
diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py
index 149744b01df5..2ccc9a8bc22d 100644
--- a/src/diffusers/__init__.py
+++ b/src/diffusers/__init__.py
@@ -141,13 +141,19 @@
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
+ KandinskyCombinedPipeline,
+ KandinskyImg2ImgCombinedPipeline,
KandinskyImg2ImgPipeline,
+ KandinskyInpaintCombinedPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
+ KandinskyV22CombinedPipeline,
KandinskyV22ControlnetImg2ImgPipeline,
KandinskyV22ControlnetPipeline,
+ KandinskyV22Img2ImgCombinedPipeline,
KandinskyV22Img2ImgPipeline,
+ KandinskyV22InpaintCombinedPipeline,
KandinskyV22InpaintPipeline,
KandinskyV22Pipeline,
KandinskyV22PriorEmb2EmbPipeline,
diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py
index 896a9b8c91a7..a22da6373181 100644
--- a/src/diffusers/pipelines/__init__.py
+++ b/src/diffusers/pipelines/__init__.py
@@ -61,15 +61,21 @@
IFSuperResolutionPipeline,
)
from .kandinsky import (
+ KandinskyCombinedPipeline,
+ KandinskyImg2ImgCombinedPipeline,
KandinskyImg2ImgPipeline,
+ KandinskyInpaintCombinedPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
)
from .kandinsky2_2 import (
+ KandinskyV22CombinedPipeline,
KandinskyV22ControlnetImg2ImgPipeline,
KandinskyV22ControlnetPipeline,
+ KandinskyV22Img2ImgCombinedPipeline,
KandinskyV22Img2ImgPipeline,
+ KandinskyV22InpaintCombinedPipeline,
KandinskyV22InpaintPipeline,
KandinskyV22Pipeline,
KandinskyV22PriorEmb2EmbPipeline,
diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py
index c827231ada7d..66d306720aaf 100644
--- a/src/diffusers/pipelines/auto_pipeline.py
+++ b/src/diffusers/pipelines/auto_pipeline.py
@@ -24,8 +24,22 @@
StableDiffusionXLControlNetPipeline,
)
from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline
-from .kandinsky import KandinskyImg2ImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline
-from .kandinsky2_2 import KandinskyV22Img2ImgPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline
+from .kandinsky import (
+ KandinskyCombinedPipeline,
+ KandinskyImg2ImgCombinedPipeline,
+ KandinskyImg2ImgPipeline,
+ KandinskyInpaintCombinedPipeline,
+ KandinskyInpaintPipeline,
+ KandinskyPipeline,
+)
+from .kandinsky2_2 import (
+ KandinskyV22CombinedPipeline,
+ KandinskyV22Img2ImgCombinedPipeline,
+ KandinskyV22Img2ImgPipeline,
+ KandinskyV22InpaintCombinedPipeline,
+ KandinskyV22InpaintPipeline,
+ KandinskyV22Pipeline,
+)
from .stable_diffusion import (
StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline,
@@ -43,8 +57,8 @@
("stable-diffusion", StableDiffusionPipeline),
("stable-diffusion-xl", StableDiffusionXLPipeline),
("if", IFPipeline),
- ("kandinsky", KandinskyPipeline),
- ("kandinsky22", KandinskyV22Pipeline),
+ ("kandinsky", KandinskyCombinedPipeline),
+ ("kandinsky22", KandinskyV22CombinedPipeline),
("stable-diffusion-controlnet", StableDiffusionControlNetPipeline),
("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetPipeline),
]
@@ -55,8 +69,8 @@
("stable-diffusion", StableDiffusionImg2ImgPipeline),
("stable-diffusion-xl", StableDiffusionXLImg2ImgPipeline),
("if", IFImg2ImgPipeline),
- ("kandinsky", KandinskyImg2ImgPipeline),
- ("kandinsky22", KandinskyV22Img2ImgPipeline),
+ ("kandinsky", KandinskyImg2ImgCombinedPipeline),
+ ("kandinsky22", KandinskyV22Img2ImgCombinedPipeline),
("stable-diffusion-controlnet", StableDiffusionControlNetImg2ImgPipeline),
]
)
@@ -66,9 +80,28 @@
("stable-diffusion", StableDiffusionInpaintPipeline),
("stable-diffusion-xl", StableDiffusionXLInpaintPipeline),
("if", IFInpaintingPipeline),
+ ("kandinsky", KandinskyInpaintCombinedPipeline),
+ ("kandinsky22", KandinskyV22InpaintCombinedPipeline),
+ ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline),
+ ]
+)
+
+_AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict(
+ [
+ ("kandinsky", KandinskyPipeline),
+ ("kandinsky22", KandinskyV22Pipeline),
+ ]
+)
+_AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict(
+ [
+ ("kandinsky", KandinskyImg2ImgPipeline),
+ ("kandinsky22", KandinskyV22Img2ImgPipeline),
+ ]
+)
+_AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict(
+ [
("kandinsky", KandinskyInpaintPipeline),
("kandinsky22", KandinskyV22InpaintPipeline),
- ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline),
]
)
@@ -76,10 +109,27 @@
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
AUTO_INPAINT_PIPELINES_MAPPING,
+ _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING,
+ _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING,
+ _AUTO_INPAINT_DECODER_PIPELINES_MAPPING,
]
-def _get_task_class(mapping, pipeline_class_name):
+def _get_connected_pipeline(pipeline_cls):
+ # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder
+ if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values():
+ return _get_task_class(
+ AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False
+ )
+ if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values():
+ return _get_task_class(
+ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False
+ )
+ if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values():
+ return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False)
+
+
+def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool = True):
def get_model(pipeline_class_name):
for task_mapping in SUPPORTED_TASKS_MAPPINGS:
for model_name, pipeline in task_mapping.items():
@@ -92,7 +142,9 @@ def get_model(pipeline_class_name):
task_class = mapping.get(model_name, None)
if task_class is not None:
return task_class
- raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}")
+
+ if throw_error_if_not_exist:
+ raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}")
def _get_signature_keys(obj):
@@ -336,7 +388,7 @@ def from_pipe(cls, pipeline, **kwargs):
if len(missing_modules) > 0:
raise ValueError(
- f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(passed_class_obj.keys()) + set(original_class_obj.keys())} were passed"
+ f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed"
)
model = text_2_image_cls(**text_2_image_kwargs)
@@ -581,7 +633,7 @@ def from_pipe(cls, pipeline, **kwargs):
if len(missing_modules) > 0:
raise ValueError(
- f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(passed_class_obj.keys()) + set(original_class_obj.keys())} were passed"
+ f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed"
)
model = image_2_image_cls(**image_2_image_kwargs)
@@ -824,7 +876,7 @@ def from_pipe(cls, pipeline, **kwargs):
if len(missing_modules) > 0:
raise ValueError(
- f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(passed_class_obj.keys()) + set(original_class_obj.keys())} were passed"
+ f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed"
)
model = inpainting_cls(**inpainting_kwargs)
diff --git a/src/diffusers/pipelines/kandinsky/__init__.py b/src/diffusers/pipelines/kandinsky/__init__.py
index 09bb84e7fd8b..946d31649018 100644
--- a/src/diffusers/pipelines/kandinsky/__init__.py
+++ b/src/diffusers/pipelines/kandinsky/__init__.py
@@ -12,6 +12,11 @@
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_kandinsky import KandinskyPipeline
+ from .pipeline_kandinsky_combined import (
+ KandinskyCombinedPipeline,
+ KandinskyImg2ImgCombinedPipeline,
+ KandinskyInpaintCombinedPipeline,
+ )
from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py
index 8e42119191b8..89afa0060ef8 100644
--- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py
+++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import torch
from transformers import (
@@ -269,6 +269,8 @@ def __call__(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
@@ -309,6 +311,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -397,6 +405,10 @@ def __call__(
latents,
generator=generator,
).prev_sample
+
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py
new file mode 100644
index 000000000000..c7f439fbabb6
--- /dev/null
+++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py
@@ -0,0 +1,791 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Callable, List, Optional, Union
+
+import PIL
+import torch
+from transformers import (
+ CLIPImageProcessor,
+ CLIPTextModelWithProjection,
+ CLIPTokenizer,
+ CLIPVisionModelWithProjection,
+ XLMRobertaTokenizer,
+)
+
+from ...models import PriorTransformer, UNet2DConditionModel, VQModel
+from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler
+from ...utils import (
+ replace_example_docstring,
+)
+from ..pipeline_utils import DiffusionPipeline
+from .pipeline_kandinsky import KandinskyPipeline
+from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline
+from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
+from .pipeline_kandinsky_prior import KandinskyPriorPipeline
+from .text_encoder import MultilingualCLIP
+
+
+TEXT2IMAGE_EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ from diffusers import AutoPipelineForText2Image
+ import torch
+
+ pipe = AutoPipelineForText2Image.from_pretrained(
+ "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+
+ prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
+
+ image = pipe(prompt=prompt, num_inference_steps=25).images[0]
+ ```
+"""
+
+IMAGE2IMAGE_EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ from diffusers import AutoPipelineForImage2Image
+ import torch
+ import requests
+ from io import BytesIO
+ from PIL import Image
+ import os
+
+ pipe = AutoPipelineForImage2Image.from_pretrained(
+ "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+
+ prompt = "A fantasy landscape, Cinematic lighting"
+ negative_prompt = "low quality, bad quality"
+
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
+
+ response = requests.get(url)
+ image = Image.open(BytesIO(response.content)).convert("RGB")
+ image.thumbnail((768, 768))
+
+ image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]
+ ```
+"""
+
+INPAINT_EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ from diffusers import AutoPipelineForInpainting
+ from diffusers.utils import load_image
+ import torch
+ import numpy as np
+
+ pipe = AutoPipelineForInpainting.from_pretrained(
+ "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+
+ prompt = "A fantasy landscape, Cinematic lighting"
+ negative_prompt = "low quality, bad quality"
+
+ original_image = load_image(
+ "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
+ )
+
+ mask = np.zeros((768, 768), dtype=np.float32)
+ # Let's mask out an area above the cat's head
+ mask[:250, 250:-250] = 1
+
+ image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]
+ ```
+"""
+
+
+class KandinskyCombinedPipeline(DiffusionPipeline):
+ """
+ Combined Pipeline for text-to-image generation using Kandinsky
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ text_encoder ([`MultilingualCLIP`]):
+ Frozen text-encoder.
+ tokenizer ([`XLMRobertaTokenizer`]):
+ Tokenizer of class
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
+ A scheduler to be used in combination with `unet` to generate image latents.
+ unet ([`UNet2DConditionModel`]):
+ Conditional U-Net architecture to denoise the image embedding.
+ movq ([`VQModel`]):
+ MoVQ Decoder to generate the image from the latents.
+ prior_prior ([`PriorTransformer`]):
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
+ prior_image_encoder ([`CLIPVisionModelWithProjection`]):
+ Frozen image-encoder.
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
+ Frozen text-encoder.
+ prior_tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ prior_scheduler ([`UnCLIPScheduler`]):
+ A scheduler to be used in combination with `prior` to generate image embedding.
+ """
+
+ _load_connected_pipes = True
+
+ def __init__(
+ self,
+ text_encoder: MultilingualCLIP,
+ tokenizer: XLMRobertaTokenizer,
+ unet: UNet2DConditionModel,
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
+ movq: VQModel,
+ prior_prior: PriorTransformer,
+ prior_image_encoder: CLIPVisionModelWithProjection,
+ prior_text_encoder: CLIPTextModelWithProjection,
+ prior_tokenizer: CLIPTokenizer,
+ prior_scheduler: UnCLIPScheduler,
+ prior_image_processor: CLIPImageProcessor,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ prior_prior=prior_prior,
+ prior_image_encoder=prior_image_encoder,
+ prior_text_encoder=prior_text_encoder,
+ prior_tokenizer=prior_tokenizer,
+ prior_scheduler=prior_scheduler,
+ prior_image_processor=prior_image_processor,
+ )
+ self.prior_pipe = KandinskyPriorPipeline(
+ prior=prior_prior,
+ image_encoder=prior_image_encoder,
+ text_encoder=prior_text_encoder,
+ tokenizer=prior_tokenizer,
+ scheduler=prior_scheduler,
+ image_processor=prior_image_processor,
+ )
+ self.decoder_pipe = KandinskyPipeline(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ )
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ self.prior_pipe.enable_model_cpu_offload()
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def progress_bar(self, iterable=None, total=None):
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def set_progress_bar_config(self, **kwargs):
+ self.prior_pipe.set_progress_bar_config(**kwargs)
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
+
+ @torch.no_grad()
+ @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_inference_steps: int = 100,
+ guidance_scale: float = 4.0,
+ num_images_per_prompt: int = 1,
+ height: int = 512,
+ width: int = 512,
+ prior_guidance_scale: float = 4.0,
+ prior_num_inference_steps: int = 25,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ return_dict: bool = True,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ prior_num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
+ (`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
+ """
+ prior_outputs = self.prior_pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ num_inference_steps=prior_num_inference_steps,
+ generator=generator,
+ latents=latents,
+ guidance_scale=prior_guidance_scale,
+ output_type="pt",
+ return_dict=False,
+ )
+ image_embeds = prior_outputs[0]
+ negative_image_embeds = prior_outputs[1]
+
+ prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
+
+ if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
+ prompt = (image_embeds.shape[0] // len(prompt)) * prompt
+
+ outputs = self.decoder_pipe(
+ prompt=prompt,
+ image_embeds=image_embeds,
+ negative_image_embeds=negative_image_embeds,
+ width=width,
+ height=height,
+ num_inference_steps=num_inference_steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ output_type=output_type,
+ callback=callback,
+ callback_steps=callback_steps,
+ return_dict=return_dict,
+ )
+ return outputs
+
+
+class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline):
+ """
+ Combined Pipeline for image-to-image generation using Kandinsky
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ text_encoder ([`MultilingualCLIP`]):
+ Frozen text-encoder.
+ tokenizer ([`XLMRobertaTokenizer`]):
+ Tokenizer of class
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
+ A scheduler to be used in combination with `unet` to generate image latents.
+ unet ([`UNet2DConditionModel`]):
+ Conditional U-Net architecture to denoise the image embedding.
+ movq ([`VQModel`]):
+ MoVQ Decoder to generate the image from the latents.
+ prior_prior ([`PriorTransformer`]):
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
+ prior_image_encoder ([`CLIPVisionModelWithProjection`]):
+ Frozen image-encoder.
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
+ Frozen text-encoder.
+ prior_tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ prior_scheduler ([`UnCLIPScheduler`]):
+ A scheduler to be used in combination with `prior` to generate image embedding.
+ """
+
+ _load_connected_pipes = True
+
+ def __init__(
+ self,
+ text_encoder: MultilingualCLIP,
+ tokenizer: XLMRobertaTokenizer,
+ unet: UNet2DConditionModel,
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
+ movq: VQModel,
+ prior_prior: PriorTransformer,
+ prior_image_encoder: CLIPVisionModelWithProjection,
+ prior_text_encoder: CLIPTextModelWithProjection,
+ prior_tokenizer: CLIPTokenizer,
+ prior_scheduler: UnCLIPScheduler,
+ prior_image_processor: CLIPImageProcessor,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ prior_prior=prior_prior,
+ prior_image_encoder=prior_image_encoder,
+ prior_text_encoder=prior_text_encoder,
+ prior_tokenizer=prior_tokenizer,
+ prior_scheduler=prior_scheduler,
+ prior_image_processor=prior_image_processor,
+ )
+ self.prior_pipe = KandinskyPriorPipeline(
+ prior=prior_prior,
+ image_encoder=prior_image_encoder,
+ text_encoder=prior_text_encoder,
+ tokenizer=prior_tokenizer,
+ scheduler=prior_scheduler,
+ image_processor=prior_image_processor,
+ )
+ self.decoder_pipe = KandinskyImg2ImgPipeline(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ )
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ self.prior_pipe.enable_model_cpu_offload()
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def progress_bar(self, iterable=None, total=None):
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def set_progress_bar_config(self, **kwargs):
+ self.prior_pipe.set_progress_bar_config(**kwargs)
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
+
+ @torch.no_grad()
+ @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_inference_steps: int = 100,
+ guidance_scale: float = 4.0,
+ num_images_per_prompt: int = 1,
+ strength: float = 0.3,
+ height: int = 512,
+ width: int = 512,
+ prior_guidance_scale: float = 4.0,
+ prior_num_inference_steps: int = 25,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ return_dict: bool = True,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
+ process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
+ again.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ strength (`float`, *optional*, defaults to 0.3):
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
+ be maximum and the denoising process will run for the full number of iterations specified in
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ prior_num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
+ (`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
+ """
+ prior_outputs = self.prior_pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ num_inference_steps=prior_num_inference_steps,
+ generator=generator,
+ latents=latents,
+ guidance_scale=prior_guidance_scale,
+ output_type="pt",
+ return_dict=False,
+ )
+ image_embeds = prior_outputs[0]
+ negative_image_embeds = prior_outputs[1]
+
+ prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
+ image = [image] if isinstance(prompt, PIL.Image.Image) else image
+
+ if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
+ prompt = (image_embeds.shape[0] // len(prompt)) * prompt
+
+ if (
+ isinstance(image, (list, tuple))
+ and len(image) < image_embeds.shape[0]
+ and image_embeds.shape[0] % len(image) == 0
+ ):
+ image = (image_embeds.shape[0] // len(image)) * image
+
+ outputs = self.decoder_pipe(
+ prompt=prompt,
+ image=image,
+ image_embeds=image_embeds,
+ negative_image_embeds=negative_image_embeds,
+ strength=strength,
+ width=width,
+ height=height,
+ num_inference_steps=num_inference_steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ output_type=output_type,
+ callback=callback,
+ callback_steps=callback_steps,
+ return_dict=return_dict,
+ )
+ return outputs
+
+
+class KandinskyInpaintCombinedPipeline(DiffusionPipeline):
+ """
+ Combined Pipeline for generation using Kandinsky
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ text_encoder ([`MultilingualCLIP`]):
+ Frozen text-encoder.
+ tokenizer ([`XLMRobertaTokenizer`]):
+ Tokenizer of class
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
+ A scheduler to be used in combination with `unet` to generate image latents.
+ unet ([`UNet2DConditionModel`]):
+ Conditional U-Net architecture to denoise the image embedding.
+ movq ([`VQModel`]):
+ MoVQ Decoder to generate the image from the latents.
+ prior_prior ([`PriorTransformer`]):
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
+ prior_image_encoder ([`CLIPVisionModelWithProjection`]):
+ Frozen image-encoder.
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
+ Frozen text-encoder.
+ prior_tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ prior_scheduler ([`UnCLIPScheduler`]):
+ A scheduler to be used in combination with `prior` to generate image embedding.
+ """
+
+ _load_connected_pipes = True
+
+ def __init__(
+ self,
+ text_encoder: MultilingualCLIP,
+ tokenizer: XLMRobertaTokenizer,
+ unet: UNet2DConditionModel,
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
+ movq: VQModel,
+ prior_prior: PriorTransformer,
+ prior_image_encoder: CLIPVisionModelWithProjection,
+ prior_text_encoder: CLIPTextModelWithProjection,
+ prior_tokenizer: CLIPTokenizer,
+ prior_scheduler: UnCLIPScheduler,
+ prior_image_processor: CLIPImageProcessor,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ prior_prior=prior_prior,
+ prior_image_encoder=prior_image_encoder,
+ prior_text_encoder=prior_text_encoder,
+ prior_tokenizer=prior_tokenizer,
+ prior_scheduler=prior_scheduler,
+ prior_image_processor=prior_image_processor,
+ )
+ self.prior_pipe = KandinskyPriorPipeline(
+ prior=prior_prior,
+ image_encoder=prior_image_encoder,
+ text_encoder=prior_text_encoder,
+ tokenizer=prior_tokenizer,
+ scheduler=prior_scheduler,
+ image_processor=prior_image_processor,
+ )
+ self.decoder_pipe = KandinskyInpaintPipeline(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ )
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ self.prior_pipe.enable_model_cpu_offload()
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def progress_bar(self, iterable=None, total=None):
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def set_progress_bar_config(self, **kwargs):
+ self.prior_pipe.set_progress_bar_config(**kwargs)
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
+
+ @torch.no_grad()
+ @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_inference_steps: int = 100,
+ guidance_scale: float = 4.0,
+ num_images_per_prompt: int = 1,
+ height: int = 512,
+ width: int = 512,
+ prior_guidance_scale: float = 4.0,
+ prior_num_inference_steps: int = 25,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ return_dict: bool = True,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
+ process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
+ again.
+ mask_image (`np.array`):
+ Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while
+ black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
+ channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3,
+ so the expected shape would be `(B, H, W, 1)`.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ prior_num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
+ (`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
+ """
+ prior_outputs = self.prior_pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ num_inference_steps=prior_num_inference_steps,
+ generator=generator,
+ latents=latents,
+ guidance_scale=prior_guidance_scale,
+ output_type="pt",
+ return_dict=False,
+ )
+ image_embeds = prior_outputs[0]
+ negative_image_embeds = prior_outputs[1]
+
+ prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
+ image = [image] if isinstance(prompt, PIL.Image.Image) else image
+ mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image
+
+ if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
+ prompt = (image_embeds.shape[0] // len(prompt)) * prompt
+
+ if (
+ isinstance(image, (list, tuple))
+ and len(image) < image_embeds.shape[0]
+ and image_embeds.shape[0] % len(image) == 0
+ ):
+ image = (image_embeds.shape[0] // len(image)) * image
+
+ if (
+ isinstance(mask_image, (list, tuple))
+ and len(mask_image) < image_embeds.shape[0]
+ and image_embeds.shape[0] % len(mask_image) == 0
+ ):
+ mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image
+
+ outputs = self.decoder_pipe(
+ prompt=prompt,
+ image=image,
+ mask_image=mask_image,
+ image_embeds=image_embeds,
+ negative_image_embeds=negative_image_embeds,
+ width=width,
+ height=height,
+ num_inference_steps=num_inference_steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ output_type=output_type,
+ callback=callback,
+ callback_steps=callback_steps,
+ return_dict=return_dict,
+ )
+ return outputs
diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py
index 14740f7e6afe..5673d306aa0c 100644
--- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py
+++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import numpy as np
import PIL
@@ -332,6 +332,8 @@ def __call__(
num_images_per_prompt: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
@@ -377,6 +379,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -491,6 +499,9 @@ def __call__(
generator=generator,
).prev_sample
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
# 7. post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py
index 2b525d4ecba0..dda0c3faa7fd 100644
--- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py
+++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py
@@ -13,17 +13,19 @@
# limitations under the License.
from copy import deepcopy
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
import torch.nn.functional as F
+from packaging import version
from PIL import Image
from transformers import (
XLMRobertaTokenizer,
)
+from ... import __version__
from ...models import UNet2DConditionModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import (
@@ -65,8 +67,8 @@
... "/kandinsky/cat.png"
... )
- >>> mask = np.ones((768, 768), dtype=np.float32)
- >>> mask[:250, 250:-250] = 0
+ >>> mask = np.zeros((768, 768), dtype=np.float32)
+ >>> mask[:250, 250:-250] = 1
>>> out = pipe(
... prompt,
@@ -232,6 +234,8 @@ def prepare_mask_and_masked_image(image, mask, height, width):
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
+ mask = 1 - mask
+
return mask, image
@@ -273,6 +277,7 @@ def __init__(
scheduler=scheduler,
)
self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
+ self._warn_has_been_called = False
# Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
@@ -431,6 +436,8 @@ def __call__(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
@@ -482,6 +489,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -490,6 +503,18 @@ def __call__(
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
+ if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse(
+ "0.22.0.dev0"
+ ):
+ logger.warn(
+ "Please note that the expected format of `mask_image` has recently been changed. "
+ "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. "
+ "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. "
+ "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. "
+ "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. "
+ "This warning will be surpressed after the first inference call and will be removed in diffusers>0.22.0"
+ )
+ self._warn_has_been_called = True
# Define call parameters
if isinstance(prompt, str):
@@ -609,6 +634,9 @@ def __call__(
generator=generator,
).prev_sample
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py
index bf75eeacfdf3..57d8c7beb97a 100644
--- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py
+++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py
@@ -24,6 +24,8 @@
from ...schedulers import UnCLIPScheduler
from ...utils import (
BaseOutput,
+ is_accelerate_available,
+ is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
@@ -179,7 +181,7 @@ def interpolate(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
negative_prior_prompt: Optional[str] = None,
- negative_prompt: Union[str] = "",
+ negative_prompt: str = "",
guidance_scale: float = 4.0,
device=None,
):
@@ -393,6 +395,35 @@ def _encode_prompt(
return prompt_embeds, text_encoder_hidden_states, text_mask
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
+ from accelerate import cpu_offload_with_hook
+ else:
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu", silence_dtype_warnings=True)
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ hook = None
+ for cpu_offloaded_model in [self.text_encoder, self.prior]:
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
+
+ # We'll offload the last model manually.
+ self.prior_hook = hook
+
+ _, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook)
+
+ self.final_offload_hook = hook
+
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
@@ -525,9 +556,15 @@ def __call__(
# if negative prompt has been defined, we retrieve split the image embedding into two
if negative_prompt is None:
zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)
+
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
else:
image_embeddings, zero_embeds = image_embeddings.chunk(2)
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.prior_hook.offload()
+
if output_type not in ["pt", "np"]:
raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/__init__.py b/src/diffusers/pipelines/kandinsky2_2/__init__.py
index 3d311eef87fc..4997a2e4056b 100644
--- a/src/diffusers/pipelines/kandinsky2_2/__init__.py
+++ b/src/diffusers/pipelines/kandinsky2_2/__init__.py
@@ -12,6 +12,11 @@
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_kandinsky2_2 import KandinskyV22Pipeline
+ from .pipeline_kandinsky2_2_combined import (
+ KandinskyV22CombinedPipeline,
+ KandinskyV22Img2ImgCombinedPipeline,
+ KandinskyV22InpaintCombinedPipeline,
+ )
from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline
from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline
from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py
index e30df96becb3..ccbdae09dc08 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import torch
@@ -148,11 +148,14 @@ def __call__(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
- Args:
Function invoked when calling the pipeline for generation.
+
+ Args:
image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
The clip image embeddings for text prompt, that will be used to condition the image generation.
negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
@@ -182,6 +185,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -258,9 +267,16 @@ def __call__(
latents,
generator=generator,
)[0]
+
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py
new file mode 100644
index 000000000000..977a82fdbc9f
--- /dev/null
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py
@@ -0,0 +1,761 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, List, Optional, Union
+
+import PIL
+import torch
+from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection
+
+from ...models import PriorTransformer, UNet2DConditionModel, VQModel
+from ...schedulers import DDPMScheduler, UnCLIPScheduler
+from ...utils import (
+ logging,
+ replace_example_docstring,
+)
+from ..pipeline_utils import DiffusionPipeline
+from .pipeline_kandinsky2_2 import KandinskyV22Pipeline
+from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline
+from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline
+from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+TEXT2IMAGE_EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ from diffusers import AutoPipelineForText2Image
+ import torch
+
+ pipe = AutoPipelineForText2Image.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+
+ prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
+
+ image = pipe(prompt=prompt, num_inference_steps=25).images[0]
+ ```
+"""
+
+IMAGE2IMAGE_EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ from diffusers import AutoPipelineForImage2Image
+ import torch
+ import requests
+ from io import BytesIO
+ from PIL import Image
+ import os
+
+ pipe = AutoPipelineForImage2Image.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+
+ prompt = "A fantasy landscape, Cinematic lighting"
+ negative_prompt = "low quality, bad quality"
+
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
+
+ response = requests.get(url)
+ image = Image.open(BytesIO(response.content)).convert("RGB")
+ image.thumbnail((768, 768))
+
+ image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]
+ ```
+"""
+
+INPAINT_EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ from diffusers import AutoPipelineForInpainting
+ from diffusers.utils import load_image
+ import torch
+ import numpy as np
+
+ pipe = AutoPipelineForInpainting.from_pretrained(
+ "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+
+ prompt = "A fantasy landscape, Cinematic lighting"
+ negative_prompt = "low quality, bad quality"
+
+ original_image = load_image(
+ "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
+ )
+
+ mask = np.zeros((768, 768), dtype=np.float32)
+ # Let's mask out an area above the cat's head
+ mask[:250, 250:-250] = 1
+
+ image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]
+ ```
+"""
+
+
+class KandinskyV22CombinedPipeline(DiffusionPipeline):
+ """
+ Combined Pipeline for text-to-image generation using Kandinsky
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
+ A scheduler to be used in combination with `unet` to generate image latents.
+ unet ([`UNet2DConditionModel`]):
+ Conditional U-Net architecture to denoise the image embedding.
+ movq ([`VQModel`]):
+ MoVQ Decoder to generate the image from the latents.
+ prior_prior ([`PriorTransformer`]):
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
+ prior_image_encoder ([`CLIPVisionModelWithProjection`]):
+ Frozen image-encoder.
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
+ Frozen text-encoder.
+ prior_tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ prior_scheduler ([`UnCLIPScheduler`]):
+ A scheduler to be used in combination with `prior` to generate image embedding.
+ prior_image_processor ([`CLIPImageProcessor`]):
+ A image_processor to be used to preprocess image from clip.
+ """
+
+ _load_connected_pipes = True
+
+ def __init__(
+ self,
+ unet: UNet2DConditionModel,
+ scheduler: DDPMScheduler,
+ movq: VQModel,
+ prior_prior: PriorTransformer,
+ prior_image_encoder: CLIPVisionModelWithProjection,
+ prior_text_encoder: CLIPTextModelWithProjection,
+ prior_tokenizer: CLIPTokenizer,
+ prior_scheduler: UnCLIPScheduler,
+ prior_image_processor: CLIPImageProcessor,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ prior_prior=prior_prior,
+ prior_image_encoder=prior_image_encoder,
+ prior_text_encoder=prior_text_encoder,
+ prior_tokenizer=prior_tokenizer,
+ prior_scheduler=prior_scheduler,
+ prior_image_processor=prior_image_processor,
+ )
+ self.prior_pipe = KandinskyV22PriorPipeline(
+ prior=prior_prior,
+ image_encoder=prior_image_encoder,
+ text_encoder=prior_text_encoder,
+ tokenizer=prior_tokenizer,
+ scheduler=prior_scheduler,
+ image_processor=prior_image_processor,
+ )
+ self.decoder_pipe = KandinskyV22Pipeline(
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ )
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ self.prior_pipe.enable_model_cpu_offload()
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def progress_bar(self, iterable=None, total=None):
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def set_progress_bar_config(self, **kwargs):
+ self.prior_pipe.set_progress_bar_config(**kwargs)
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
+
+ @torch.no_grad()
+ @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_inference_steps: int = 100,
+ guidance_scale: float = 4.0,
+ num_images_per_prompt: int = 1,
+ height: int = 512,
+ width: int = 512,
+ prior_guidance_scale: float = 4.0,
+ prior_num_inference_steps: int = 25,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ return_dict: bool = True,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ prior_num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
+ (`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
+ """
+ prior_outputs = self.prior_pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ num_inference_steps=prior_num_inference_steps,
+ generator=generator,
+ latents=latents,
+ guidance_scale=prior_guidance_scale,
+ output_type="pt",
+ return_dict=False,
+ )
+ image_embeds = prior_outputs[0]
+ negative_image_embeds = prior_outputs[1]
+
+ prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
+
+ if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
+ prompt = (image_embeds.shape[0] // len(prompt)) * prompt
+
+ outputs = self.decoder_pipe(
+ image_embeds=image_embeds,
+ negative_image_embeds=negative_image_embeds,
+ width=width,
+ height=height,
+ num_inference_steps=num_inference_steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ output_type=output_type,
+ callback=callback,
+ callback_steps=callback_steps,
+ return_dict=return_dict,
+ )
+ return outputs
+
+
+class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline):
+ """
+ Combined Pipeline for image-to-image generation using Kandinsky
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
+ A scheduler to be used in combination with `unet` to generate image latents.
+ unet ([`UNet2DConditionModel`]):
+ Conditional U-Net architecture to denoise the image embedding.
+ movq ([`VQModel`]):
+ MoVQ Decoder to generate the image from the latents.
+ prior_prior ([`PriorTransformer`]):
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
+ prior_image_encoder ([`CLIPVisionModelWithProjection`]):
+ Frozen image-encoder.
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
+ Frozen text-encoder.
+ prior_tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ prior_scheduler ([`UnCLIPScheduler`]):
+ A scheduler to be used in combination with `prior` to generate image embedding.
+ prior_image_processor ([`CLIPImageProcessor`]):
+ A image_processor to be used to preprocess image from clip.
+ """
+
+ _load_connected_pipes = True
+
+ def __init__(
+ self,
+ unet: UNet2DConditionModel,
+ scheduler: DDPMScheduler,
+ movq: VQModel,
+ prior_prior: PriorTransformer,
+ prior_image_encoder: CLIPVisionModelWithProjection,
+ prior_text_encoder: CLIPTextModelWithProjection,
+ prior_tokenizer: CLIPTokenizer,
+ prior_scheduler: UnCLIPScheduler,
+ prior_image_processor: CLIPImageProcessor,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ prior_prior=prior_prior,
+ prior_image_encoder=prior_image_encoder,
+ prior_text_encoder=prior_text_encoder,
+ prior_tokenizer=prior_tokenizer,
+ prior_scheduler=prior_scheduler,
+ prior_image_processor=prior_image_processor,
+ )
+ self.prior_pipe = KandinskyV22PriorPipeline(
+ prior=prior_prior,
+ image_encoder=prior_image_encoder,
+ text_encoder=prior_text_encoder,
+ tokenizer=prior_tokenizer,
+ scheduler=prior_scheduler,
+ image_processor=prior_image_processor,
+ )
+ self.decoder_pipe = KandinskyV22Img2ImgPipeline(
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ )
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ self.prior_pipe.enable_model_cpu_offload()
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def progress_bar(self, iterable=None, total=None):
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def set_progress_bar_config(self, **kwargs):
+ self.prior_pipe.set_progress_bar_config(**kwargs)
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
+
+ @torch.no_grad()
+ @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_inference_steps: int = 100,
+ guidance_scale: float = 4.0,
+ strength: float = 0.3,
+ num_images_per_prompt: int = 1,
+ height: int = 512,
+ width: int = 512,
+ prior_guidance_scale: float = 4.0,
+ prior_num_inference_steps: int = 25,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ return_dict: bool = True,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
+ process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
+ again.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ strength (`float`, *optional*, defaults to 0.3):
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
+ be maximum and the denoising process will run for the full number of iterations specified in
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
+ num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ prior_num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
+ (`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
+ """
+ prior_outputs = self.prior_pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ num_inference_steps=prior_num_inference_steps,
+ generator=generator,
+ latents=latents,
+ guidance_scale=prior_guidance_scale,
+ output_type="pt",
+ return_dict=False,
+ )
+ image_embeds = prior_outputs[0]
+ negative_image_embeds = prior_outputs[1]
+
+ prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
+ image = [image] if isinstance(prompt, PIL.Image.Image) else image
+
+ if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
+ prompt = (image_embeds.shape[0] // len(prompt)) * prompt
+
+ if (
+ isinstance(image, (list, tuple))
+ and len(image) < image_embeds.shape[0]
+ and image_embeds.shape[0] % len(image) == 0
+ ):
+ image = (image_embeds.shape[0] // len(image)) * image
+
+ outputs = self.decoder_pipe(
+ image=image,
+ image_embeds=image_embeds,
+ negative_image_embeds=negative_image_embeds,
+ width=width,
+ height=height,
+ strength=strength,
+ num_inference_steps=num_inference_steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ output_type=output_type,
+ callback=callback,
+ callback_steps=callback_steps,
+ return_dict=return_dict,
+ )
+ return outputs
+
+
+class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline):
+ """
+ Combined Pipeline for inpainting generation using Kandinsky
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
+ A scheduler to be used in combination with `unet` to generate image latents.
+ unet ([`UNet2DConditionModel`]):
+ Conditional U-Net architecture to denoise the image embedding.
+ movq ([`VQModel`]):
+ MoVQ Decoder to generate the image from the latents.
+ prior_prior ([`PriorTransformer`]):
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
+ prior_image_encoder ([`CLIPVisionModelWithProjection`]):
+ Frozen image-encoder.
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
+ Frozen text-encoder.
+ prior_tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ prior_scheduler ([`UnCLIPScheduler`]):
+ A scheduler to be used in combination with `prior` to generate image embedding.
+ prior_image_processor ([`CLIPImageProcessor`]):
+ A image_processor to be used to preprocess image from clip.
+ """
+
+ _load_connected_pipes = True
+
+ def __init__(
+ self,
+ unet: UNet2DConditionModel,
+ scheduler: DDPMScheduler,
+ movq: VQModel,
+ prior_prior: PriorTransformer,
+ prior_image_encoder: CLIPVisionModelWithProjection,
+ prior_text_encoder: CLIPTextModelWithProjection,
+ prior_tokenizer: CLIPTokenizer,
+ prior_scheduler: UnCLIPScheduler,
+ prior_image_processor: CLIPImageProcessor,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ prior_prior=prior_prior,
+ prior_image_encoder=prior_image_encoder,
+ prior_text_encoder=prior_text_encoder,
+ prior_tokenizer=prior_tokenizer,
+ prior_scheduler=prior_scheduler,
+ prior_image_processor=prior_image_processor,
+ )
+ self.prior_pipe = KandinskyV22PriorPipeline(
+ prior=prior_prior,
+ image_encoder=prior_image_encoder,
+ text_encoder=prior_text_encoder,
+ tokenizer=prior_tokenizer,
+ scheduler=prior_scheduler,
+ image_processor=prior_image_processor,
+ )
+ self.decoder_pipe = KandinskyV22InpaintPipeline(
+ unet=unet,
+ scheduler=scheduler,
+ movq=movq,
+ )
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ self.prior_pipe.enable_model_cpu_offload()
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def progress_bar(self, iterable=None, total=None):
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
+ self.decoder_pipe.enable_model_cpu_offload()
+
+ def set_progress_bar_config(self, **kwargs):
+ self.prior_pipe.set_progress_bar_config(**kwargs)
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
+
+ @torch.no_grad()
+ @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_inference_steps: int = 100,
+ guidance_scale: float = 4.0,
+ num_images_per_prompt: int = 1,
+ height: int = 512,
+ width: int = 512,
+ prior_guidance_scale: float = 4.0,
+ prior_num_inference_steps: int = 25,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ return_dict: bool = True,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
+ process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
+ again.
+ mask_image (`np.array`):
+ Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while
+ black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
+ channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3,
+ so the expected shape would be `(B, H, W, 1)`.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ prior_num_inference_steps (`int`, *optional*, defaults to 100):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
+ (`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
+ """
+ prior_outputs = self.prior_pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ num_inference_steps=prior_num_inference_steps,
+ generator=generator,
+ latents=latents,
+ guidance_scale=prior_guidance_scale,
+ output_type="pt",
+ return_dict=False,
+ )
+ image_embeds = prior_outputs[0]
+ negative_image_embeds = prior_outputs[1]
+
+ prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
+ image = [image] if isinstance(prompt, PIL.Image.Image) else image
+ mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image
+
+ if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
+ prompt = (image_embeds.shape[0] // len(prompt)) * prompt
+
+ if (
+ isinstance(image, (list, tuple))
+ and len(image) < image_embeds.shape[0]
+ and image_embeds.shape[0] % len(image) == 0
+ ):
+ image = (image_embeds.shape[0] // len(image)) * image
+
+ if (
+ isinstance(mask_image, (list, tuple))
+ and len(mask_image) < image_embeds.shape[0]
+ and image_embeds.shape[0] % len(mask_image) == 0
+ ):
+ mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image
+
+ outputs = self.decoder_pipe(
+ image=image,
+ mask_image=mask_image,
+ image_embeds=image_embeds,
+ negative_image_embeds=negative_image_embeds,
+ width=width,
+ height=height,
+ num_inference_steps=num_inference_steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ output_type=output_type,
+ callback=callback,
+ callback_steps=callback_steps,
+ return_dict=return_dict,
+ )
+ return outputs
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py
index 8ae251a5676f..22b3eaf0915e 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import torch
@@ -190,6 +190,8 @@ def __call__(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
@@ -232,6 +234,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -313,9 +321,16 @@ def __call__(
latents,
generator=generator,
)[0]
+
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py
index 30638a163a8e..53918fede7c2 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import numpy as np
import PIL
@@ -246,6 +246,8 @@ def __call__(
num_images_per_prompt: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
@@ -289,6 +291,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -374,9 +382,16 @@ def __call__(
generator=generator,
)[0]
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py
index 423837a40027..dba50312e8d7 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import numpy as np
import PIL
@@ -218,6 +218,8 @@ def __call__(
num_images_per_prompt: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
@@ -259,6 +261,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -338,9 +346,16 @@ def __call__(
generator=generator,
)[0]
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
# post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py
index 18a5041f937e..2e0a0d833740 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py
@@ -13,14 +13,16 @@
# limitations under the License.
from copy import deepcopy
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
import torch.nn.functional as F
+from packaging import version
from PIL import Image
+from ... import __version__
from ...models import UNet2DConditionModel, VQModel
from ...schedulers import DDPMScheduler
from ...utils import (
@@ -61,8 +63,8 @@
... "/kandinsky/cat.png"
... )
- >>> mask = np.ones((768, 768), dtype=np.float32)
- >>> mask[:250, 250:-250] = 0
+ >>> mask = np.zeros((768, 768), dtype=np.float32)
+ >>> mask[:250, 250:-250] = 1
>>> out = pipe(
... image=init_image,
@@ -230,6 +232,8 @@ def prepare_mask_and_masked_image(image, mask, height, width):
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
+ mask = 1 - mask
+
return mask, image
@@ -263,6 +267,7 @@ def __init__(
movq=movq,
)
self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
+ self._warn_has_been_called = False
# Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
@@ -318,19 +323,22 @@ def __call__(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
return_dict: bool = True,
):
"""
- Args:
Function invoked when calling the pipeline for generation.
+
+ Args:
image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
The clip image embeddings for text prompt, that will be used to condition the image generation.
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
mask_image (`np.array`):
- Tensor representing an image batch, to mask `image`. Black pixels in the mask will be repainted, while
- white pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
+ Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while
+ black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3,
so the expected shape would be `(B, H, W, 1)`.
negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
@@ -360,6 +368,12 @@ def __call__(
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
@@ -368,6 +382,19 @@ def __call__(
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
+ if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse(
+ "0.22.0.dev0"
+ ):
+ logger.warn(
+ "Please note that the expected format of `mask_image` has recently been changed. "
+ "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. "
+ "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. "
+ "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. "
+ "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. "
+ "This warning will be surpressed after the first inference call and will be removed in diffusers>0.22.0"
+ )
+ self._warn_has_been_called = True
+
device = self._execution_device
do_classifier_free_guidance = guidance_scale > 1.0
@@ -470,10 +497,18 @@ def __call__(
)
latents = init_mask * init_latents_proper + (1 - init_mask) * latents
+
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
# post-processing
latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py
index b6ab2ca3fc23..3cf33b563145 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py
@@ -7,6 +7,8 @@
from ...models import PriorTransformer
from ...schedulers import UnCLIPScheduler
from ...utils import (
+ is_accelerate_available,
+ is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
@@ -137,7 +139,7 @@ def interpolate(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
negative_prior_prompt: Optional[str] = None,
- negative_prompt: Union[str] = "",
+ negative_prompt: str = "",
guidance_scale: float = 4.0,
device=None,
):
@@ -353,6 +355,35 @@ def _encode_prompt(
return prompt_embeds, text_encoder_hidden_states, text_mask
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
+ from accelerate import cpu_offload_with_hook
+ else:
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu", silence_dtype_warnings=True)
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ hook = None
+ for cpu_offloaded_model in [self.text_encoder, self.prior]:
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
+
+ # We'll offload the last model manually.
+ self.prior_hook = hook
+
+ _, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook)
+
+ self.final_offload_hook = hook
+
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
@@ -485,9 +516,15 @@ def __call__(
# if negative prompt has been defined, we retrieve split the image embedding into two
if negative_prompt is None:
zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)
+
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
else:
image_embeddings, zero_embeds = image_embeddings.chunk(2)
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.prior_hook.offload()
+
if output_type not in ["pt", "np"]:
raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py
index 75be6e54c93f..2b8792e09cf0 100644
--- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py
+++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py
@@ -7,6 +7,8 @@
from ...models import PriorTransformer
from ...schedulers import UnCLIPScheduler
from ...utils import (
+ is_accelerate_available,
+ is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
@@ -162,7 +164,7 @@ def interpolate(
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
negative_prior_prompt: Optional[str] = None,
- negative_prompt: Union[str] = "",
+ negative_prompt: str = "",
guidance_scale: float = 4.0,
device=None,
):
@@ -392,6 +394,35 @@ def _encode_prompt(
return prompt_embeds, text_encoder_hidden_states, text_mask
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
+ from accelerate import cpu_offload_with_hook
+ else:
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu", silence_dtype_warnings=True)
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ hook = None
+ for cpu_offloaded_model in [self.text_encoder, self.prior]:
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
+
+ # We'll offload the last model manually.
+ self.prior_hook = hook
+
+ _, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook)
+
+ self.final_offload_hook = hook
+
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
@@ -549,8 +580,12 @@ def __call__(
# if negative prompt has been defined, we retrieve split the image embedding into two
if negative_prompt is None:
zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
else:
image_embeddings, zero_embeds = image_embeddings.chunk(2)
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.prior_hook.offload()
if output_type not in ["pt", "np"]:
raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}")
diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py
index 6f5a3be43bf5..d95b3f6c2636 100644
--- a/src/diffusers/pipelines/pipeline_utils.py
+++ b/src/diffusers/pipelines/pipeline_utils.py
@@ -28,7 +28,7 @@
import numpy as np
import PIL
import torch
-from huggingface_hub import hf_hub_download, model_info, snapshot_download
+from huggingface_hub import ModelCard, hf_hub_download, model_info, snapshot_download
from packaging import version
from requests.exceptions import HTTPError
from tqdm.auto import tqdm
@@ -78,6 +78,7 @@
CUSTOM_PIPELINE_FILE_NAME = "pipeline.py"
DUMMY_MODULES_FOLDER = "diffusers.utils"
TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils"
+CONNECTED_PIPES_KEYS = ["prior"]
logger = logging.get_logger(__name__)
@@ -322,7 +323,9 @@ def get_class_obj_and_candidates(library_name, class_name, importable_classes, p
return class_obj, class_candidates
-def _get_pipeline_class(class_obj, config, custom_pipeline=None, cache_dir=None, revision=None):
+def _get_pipeline_class(
+ class_obj, config, load_connected_pipeline=False, custom_pipeline=None, cache_dir=None, revision=None
+):
if custom_pipeline is not None:
if custom_pipeline.endswith(".py"):
path = Path(custom_pipeline)
@@ -340,7 +343,22 @@ def _get_pipeline_class(class_obj, config, custom_pipeline=None, cache_dir=None,
return class_obj
diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0])
- return getattr(diffusers_module, config["_class_name"])
+ pipeline_cls = getattr(diffusers_module, config["_class_name"])
+
+ if load_connected_pipeline:
+ from .auto_pipeline import _get_connected_pipeline
+
+ connected_pipeline_cls = _get_connected_pipeline(pipeline_cls)
+ if connected_pipeline_cls is not None:
+ logger.info(
+ f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`"
+ )
+ else:
+ logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.")
+
+ pipeline_cls = connected_pipeline_cls or pipeline_cls
+
+ return pipeline_cls
def load_sub_model(
@@ -475,6 +493,7 @@ class DiffusionPipeline(ConfigMixin):
config_name = "model_index.json"
_optional_components = []
_exclude_from_cpu_offload = []
+ _load_connected_pipes = False
def register_modules(self, **kwargs):
# import it here to avoid circular import
@@ -875,6 +894,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
variant = kwargs.pop("variant", None)
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
+ load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
# 1. Download the checkpoints and configs
# use snapshot download here to get it working from from_pretrained
@@ -893,6 +913,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
custom_pipeline=custom_pipeline,
custom_revision=custom_revision,
variant=variant,
+ load_connected_pipeline=load_connected_pipeline,
**kwargs,
)
else:
@@ -920,7 +941,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
# 3. Load the pipeline class, if using custom module then load it from the hub
# if we load from explicit class, let's use it
pipeline_class = _get_pipeline_class(
- cls, config_dict, custom_pipeline=custom_pipeline, cache_dir=cache_dir, revision=custom_revision
+ cls,
+ config_dict,
+ load_connected_pipeline=load_connected_pipeline,
+ custom_pipeline=custom_pipeline,
+ cache_dir=cache_dir,
+ revision=custom_revision,
)
# DEPRECATED: To be removed in 1.0.0
@@ -1061,6 +1087,42 @@ def load_module(name, value):
init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)
+ if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")):
+ modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
+ connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS}
+ load_kwargs = {
+ "cache_dir": cache_dir,
+ "resume_download": resume_download,
+ "force_download": force_download,
+ "proxies": proxies,
+ "local_files_only": local_files_only,
+ "use_auth_token": use_auth_token,
+ "revision": revision,
+ "torch_dtype": torch_dtype,
+ "custom_pipeline": custom_pipeline,
+ "custom_revision": custom_revision,
+ "provider": provider,
+ "sess_options": sess_options,
+ "device_map": device_map,
+ "max_memory": max_memory,
+ "offload_folder": offload_folder,
+ "offload_state_dict": offload_state_dict,
+ "low_cpu_mem_usage": low_cpu_mem_usage,
+ "variant": variant,
+ "use_safetensors": use_safetensors,
+ }
+ connected_pipes = {
+ prefix: DiffusionPipeline.from_pretrained(repo_id, **load_kwargs.copy())
+ for prefix, repo_id in connected_pipes.items()
+ if repo_id is not None
+ }
+
+ for prefix, connected_pipe in connected_pipes.items():
+ # add connected pipes to `init_kwargs` with _, e.g. "prior_text_encoder"
+ init_kwargs.update(
+ {"_".join([prefix, name]): component for name, component in connected_pipe.components.items()}
+ )
+
# 7. Potentially add passed objects if expected
missing_modules = set(expected_modules) - set(init_kwargs.keys())
passed_modules = list(passed_class_obj.keys())
@@ -1231,6 +1293,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
custom_revision = kwargs.pop("custom_revision", None)
variant = kwargs.pop("variant", None)
use_safetensors = kwargs.pop("use_safetensors", None)
+ load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
if use_safetensors and not is_safetensors_available():
raise ValueError(
@@ -1242,7 +1305,6 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
use_safetensors = is_safetensors_available()
allow_pickle = True
- pipeline_is_cached = False
allow_patterns = None
ignore_patterns = None
@@ -1322,7 +1384,12 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
# retrieve passed components that should not be downloaded
pipeline_class = _get_pipeline_class(
- cls, config_dict, custom_pipeline=custom_pipeline, cache_dir=cache_dir, revision=custom_revision
+ cls,
+ config_dict,
+ load_connected_pipeline=load_connected_pipeline,
+ custom_pipeline=custom_pipeline,
+ cache_dir=cache_dir,
+ revision=custom_revision,
)
expected_components, _ = cls._get_signature_keys(pipeline_class)
passed_components = [k for k in expected_components if k in kwargs]
@@ -1367,6 +1434,10 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
allow_patterns = [
p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components)
]
+
+ if pipeline_class._load_connected_pipes:
+ allow_patterns.append("README.md")
+
# Don't download index files of forbidden patterns either
ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns]
@@ -1390,7 +1461,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
# download all allow_patterns - ignore_patterns
try:
- return snapshot_download(
+ cached_folder = snapshot_download(
pretrained_model_name,
cache_dir=cache_dir,
resume_download=resume_download,
@@ -1402,6 +1473,15 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
ignore_patterns=ignore_patterns,
user_agent=user_agent,
)
+
+ if pipeline_class._load_connected_pipes:
+ modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
+ connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], [])
+ for connected_pipe_repo_id in connected_pipes:
+ DiffusionPipeline.download(connected_pipe_repo_id)
+
+ return cached_folder
+
except FileNotFoundError:
# Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache.
# This can happen in two cases:
diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
index 016760337c69..254b99e85c05 100644
--- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py
+++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
@@ -167,6 +167,36 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class KandinskyCombinedPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
+class KandinskyImg2ImgCombinedPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class KandinskyImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
@@ -182,6 +212,21 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class KandinskyInpaintCombinedPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class KandinskyInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
@@ -227,6 +272,21 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class KandinskyV22CombinedPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class KandinskyV22ControlnetImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
@@ -257,6 +317,21 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class KandinskyV22Img2ImgCombinedPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class KandinskyV22Img2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
@@ -272,6 +347,21 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class KandinskyV22InpaintCombinedPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class KandinskyV22InpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
diff --git a/src/diffusers/utils/torch_utils.py b/src/diffusers/utils/torch_utils.py
index 5f64bce25e78..99ea4d8cf1d0 100644
--- a/src/diffusers/utils/torch_utils.py
+++ b/src/diffusers/utils/torch_utils.py
@@ -64,6 +64,10 @@ def randn_tensor(
elif gen_device_type != device.type and gen_device_type == "cuda":
raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.")
+ # make sure generator list of length 1 is treated like a non-list
+ if isinstance(generator, list) and len(generator) == 1:
+ generator = generator[0]
+
if isinstance(generator, list):
shape = (1,) + shape[1:]
latents = [
diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py
index 9c0b7c732933..01b8a0f3eec1 100644
--- a/tests/pipelines/kandinsky/test_kandinsky.py
+++ b/tests/pipelines/kandinsky/test_kandinsky.py
@@ -32,30 +32,7 @@
enable_full_determinism()
-class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyPipeline
- params = [
- "prompt",
- "image_embeds",
- "negative_image_embeds",
- ]
- batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -74,7 +51,7 @@ def time_embed_dim(self):
@property
def cross_attention_dim(self):
- return 100
+ return 32
@property
def dummy_tokenizer(self):
@@ -196,6 +173,39 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyPipeline
+ params = [
+ "prompt",
+ "image_embeds",
+ "negative_image_embeds",
+ ]
+ batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = Dummies()
+ return dummy.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummy = Dummies()
+ return dummy.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky(self):
device = "cpu"
@@ -219,9 +229,7 @@ def test_kandinsky(self):
assert image.shape == (1, 64, 64, 3)
- expected_slice = np.array(
- [0.328663, 1.0, 0.23216873, 1.0, 0.92717564, 0.4639046, 0.96894777, 0.31713378, 0.6293953]
- )
+ expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py
new file mode 100644
index 000000000000..21c8e78cfade
--- /dev/null
+++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py
@@ -0,0 +1,335 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import numpy as np
+
+from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline
+from diffusers.utils import torch_device
+from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
+
+from ..test_pipelines_common import PipelineTesterMixin
+from .test_kandinsky import Dummies
+from .test_kandinsky_img2img import Dummies as Img2ImgDummies
+from .test_kandinsky_inpaint import Dummies as InpaintDummies
+from .test_kandinsky_prior import Dummies as PriorDummies
+
+
+enable_full_determinism()
+
+
+class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyCombinedPipeline
+ params = [
+ "prompt",
+ ]
+ batch_params = ["prompt", "negative_prompt"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = Dummies()
+ prior_dummy = PriorDummies()
+ components = dummy.get_dummy_components()
+
+ components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ prior_dummy = PriorDummies()
+ inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
+ inputs.update(
+ {
+ "height": 64,
+ "width": 64,
+ }
+ )
+ return inputs
+
+ def test_kandinsky(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+
+ pipe.set_progress_bar_config(disable=None)
+
+ output = pipe(**self.get_dummy_inputs(device))
+ image = output.images
+
+ image_from_tuple = pipe(
+ **self.get_dummy_inputs(device),
+ return_dict=False,
+ )[0]
+
+ image_slice = image[0, -3:, -3:, -1]
+ image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
+
+ assert image.shape == (1, 64, 64, 3)
+
+ expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068])
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
+ assert (
+ np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
+
+ @require_torch_gpu
+ def test_offloads(self):
+ pipes = []
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components).to(torch_device)
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_model_cpu_offload()
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_sequential_cpu_offload()
+ pipes.append(sd_pipe)
+
+ image_slices = []
+ for pipe in pipes:
+ inputs = self.get_dummy_inputs(torch_device)
+ image = pipe(**inputs).images
+
+ image_slices.append(image[0, -3:, -3:, -1].flatten())
+
+ assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
+ assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
+
+ def test_inference_batch_single_identical(self):
+ super().test_inference_batch_single_identical(expected_max_diff=1e-2)
+
+
+class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyImg2ImgCombinedPipeline
+ params = ["prompt", "image"]
+ batch_params = ["prompt", "negative_prompt", "image"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = Img2ImgDummies()
+ prior_dummy = PriorDummies()
+ components = dummy.get_dummy_components()
+
+ components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ prior_dummy = PriorDummies()
+ dummy = Img2ImgDummies()
+ inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
+ inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
+ inputs.pop("image_embeds")
+ inputs.pop("negative_image_embeds")
+ return inputs
+
+ def test_kandinsky(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+
+ pipe.set_progress_bar_config(disable=None)
+
+ output = pipe(**self.get_dummy_inputs(device))
+ image = output.images
+
+ image_from_tuple = pipe(
+ **self.get_dummy_inputs(device),
+ return_dict=False,
+ )[0]
+
+ image_slice = image[0, -3:, -3:, -1]
+ image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
+
+ assert image.shape == (1, 64, 64, 3)
+
+ expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053])
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
+ assert (
+ np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
+
+ @require_torch_gpu
+ def test_offloads(self):
+ pipes = []
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components).to(torch_device)
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_model_cpu_offload()
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_sequential_cpu_offload()
+ pipes.append(sd_pipe)
+
+ image_slices = []
+ for pipe in pipes:
+ inputs = self.get_dummy_inputs(torch_device)
+ image = pipe(**inputs).images
+
+ image_slices.append(image[0, -3:, -3:, -1].flatten())
+
+ assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
+ assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
+
+ def test_inference_batch_single_identical(self):
+ super().test_inference_batch_single_identical(expected_max_diff=1e-2)
+
+
+class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyInpaintCombinedPipeline
+ params = ["prompt", "image", "mask_image"]
+ batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = InpaintDummies()
+ prior_dummy = PriorDummies()
+ components = dummy.get_dummy_components()
+
+ components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ prior_dummy = PriorDummies()
+ dummy = InpaintDummies()
+ inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
+ inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
+ inputs.pop("image_embeds")
+ inputs.pop("negative_image_embeds")
+ return inputs
+
+ def test_kandinsky(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+
+ pipe.set_progress_bar_config(disable=None)
+
+ output = pipe(**self.get_dummy_inputs(device))
+ image = output.images
+
+ image_from_tuple = pipe(
+ **self.get_dummy_inputs(device),
+ return_dict=False,
+ )[0]
+
+ image_slice = image[0, -3:, -3:, -1]
+ image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
+
+ assert image.shape == (1, 64, 64, 3)
+
+ expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530])
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
+ assert (
+ np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
+
+ @require_torch_gpu
+ def test_offloads(self):
+ pipes = []
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components).to(torch_device)
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_model_cpu_offload()
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_sequential_cpu_offload()
+ pipes.append(sd_pipe)
+
+ image_slices = []
+ for pipe in pipes:
+ inputs = self.get_dummy_inputs(torch_device)
+ image = pipe(**inputs).images
+
+ image_slices.append(image[0, -3:, -3:, -1].flatten())
+
+ assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
+ assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
+
+ def test_inference_batch_single_identical(self):
+ super().test_inference_batch_single_identical(expected_max_diff=1e-2)
diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py
index 6b558d4a06b7..d6588bd45dd7 100644
--- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py
+++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py
@@ -40,32 +40,7 @@
enable_full_determinism()
-class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyImg2ImgPipeline
- params = ["prompt", "image_embeds", "negative_image_embeds", "image"]
- batch_params = [
- "prompt",
- "negative_prompt",
- "image_embeds",
- "negative_image_embeds",
- "image",
- ]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "strength",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -84,7 +59,7 @@ def time_embed_dim(self):
@property
def cross_attention_dim(self):
- return 100
+ return 32
@property
def dummy_tokenizer(self):
@@ -216,6 +191,41 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyImg2ImgPipeline
+ params = ["prompt", "image_embeds", "negative_image_embeds", "image"]
+ batch_params = [
+ "prompt",
+ "negative_prompt",
+ "image_embeds",
+ "negative_image_embeds",
+ "image",
+ ]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "strength",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummies = Dummies()
+ return dummies.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummies = Dummies()
+ return dummies.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky_img2img(self):
device = "cpu"
@@ -239,9 +249,7 @@ def test_kandinsky_img2img(self):
assert image.shape == (1, 64, 64, 3)
- expected_slice = np.array(
- [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233]
- )
+ expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py
index b8777bbaf506..7f1841d60807 100644
--- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py
+++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py
@@ -33,33 +33,7 @@
enable_full_determinism()
-class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyInpaintPipeline
- params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
- batch_params = [
- "prompt",
- "negative_prompt",
- "image_embeds",
- "negative_image_embeds",
- "image",
- "mask_image",
- ]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -78,7 +52,7 @@ def time_embed_dim(self):
@property
def cross_attention_dim(self):
- return 100
+ return 32
@property
def dummy_tokenizer(self):
@@ -189,8 +163,8 @@ def get_dummy_inputs(self, device, seed=0):
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
# create mask
- mask = np.ones((64, 64), dtype=np.float32)
- mask[:32, :32] = 0
+ mask = np.zeros((64, 64), dtype=np.float32)
+ mask[:32, :32] = 1
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
@@ -211,6 +185,42 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyInpaintPipeline
+ params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
+ batch_params = [
+ "prompt",
+ "negative_prompt",
+ "image_embeds",
+ "negative_image_embeds",
+ "image",
+ "mask_image",
+ ]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummies = Dummies()
+ return dummies.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummies = Dummies()
+ return dummies.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky_inpaint(self):
device = "cpu"
@@ -232,13 +242,9 @@ def test_kandinsky_inpaint(self):
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
- print(f"image.shape {image.shape}")
-
assert image.shape == (1, 64, 64, 3)
- expected_slice = np.array(
- [0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786]
- )
+ expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@@ -296,8 +302,8 @@ def test_kandinsky_inpaint(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
- mask = np.ones((768, 768), dtype=np.float32)
- mask[:250, 250:-250] = 0
+ mask = np.zeros((768, 768), dtype=np.float32)
+ mask[:250, 250:-250] = 1
prompt = "a hat"
diff --git a/tests/pipelines/kandinsky/test_kandinsky_prior.py b/tests/pipelines/kandinsky/test_kandinsky_prior.py
index d9c260eabc06..7b1acc9fc03e 100644
--- a/tests/pipelines/kandinsky/test_kandinsky_prior.py
+++ b/tests/pipelines/kandinsky/test_kandinsky_prior.py
@@ -37,22 +37,7 @@
enable_full_determinism()
-class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyPriorPipeline
- params = ["prompt"]
- batch_params = ["prompt", "negative_prompt"]
- required_optional_params = [
- "num_images_per_prompt",
- "generator",
- "num_inference_steps",
- "latents",
- "negative_prompt",
- "guidance_scale",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -183,6 +168,31 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyPriorPipeline
+ params = ["prompt"]
+ batch_params = ["prompt", "negative_prompt"]
+ required_optional_params = [
+ "num_images_per_prompt",
+ "generator",
+ "num_inference_steps",
+ "latents",
+ "negative_prompt",
+ "guidance_scale",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = Dummies()
+ return dummy.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummy = Dummies()
+ return dummy.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky_prior(self):
device = "cpu"
diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky.py b/tests/pipelines/kandinsky_v22/test_kandinsky.py
index 162c96d4b3e2..6430a476ab98 100644
--- a/tests/pipelines/kandinsky_v22/test_kandinsky.py
+++ b/tests/pipelines/kandinsky_v22/test_kandinsky.py
@@ -30,28 +30,7 @@
enable_full_determinism()
-class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyV22Pipeline
- params = [
- "image_embeds",
- "negative_image_embeds",
- ]
- batch_params = ["image_embeds", "negative_image_embeds"]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -70,7 +49,7 @@ def time_embed_dim(self):
@property
def cross_attention_dim(self):
- return 100
+ return 32
@property
def dummy_unet(self):
@@ -166,6 +145,37 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22Pipeline
+ params = [
+ "image_embeds",
+ "negative_image_embeds",
+ ]
+ batch_params = ["image_embeds", "negative_image_embeds"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummies = Dummies()
+ return dummies.get_dummy_inputs(device=device, seed=seed)
+
+ def get_dummy_components(self):
+ dummies = Dummies()
+ return dummies.get_dummy_components()
+
def test_kandinsky(self):
device = "cpu"
@@ -189,9 +199,7 @@ def test_kandinsky(self):
assert image.shape == (1, 64, 64, 3)
- expected_slice = np.array(
- [0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046]
- )
+ expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
new file mode 100644
index 000000000000..666ea30bd0fd
--- /dev/null
+++ b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
@@ -0,0 +1,339 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import numpy as np
+
+from diffusers import (
+ KandinskyV22CombinedPipeline,
+ KandinskyV22Img2ImgCombinedPipeline,
+ KandinskyV22InpaintCombinedPipeline,
+)
+from diffusers.utils import torch_device
+from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
+
+from ..test_pipelines_common import PipelineTesterMixin
+from .test_kandinsky import Dummies
+from .test_kandinsky_img2img import Dummies as Img2ImgDummies
+from .test_kandinsky_inpaint import Dummies as InpaintDummies
+from .test_kandinsky_prior import Dummies as PriorDummies
+
+
+enable_full_determinism()
+
+
+class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22CombinedPipeline
+ params = [
+ "prompt",
+ ]
+ batch_params = ["prompt", "negative_prompt"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = Dummies()
+ prior_dummy = PriorDummies()
+ components = dummy.get_dummy_components()
+
+ components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ prior_dummy = PriorDummies()
+ inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
+ inputs.update(
+ {
+ "height": 64,
+ "width": 64,
+ }
+ )
+ return inputs
+
+ def test_kandinsky(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+
+ pipe.set_progress_bar_config(disable=None)
+
+ output = pipe(**self.get_dummy_inputs(device))
+ image = output.images
+
+ image_from_tuple = pipe(
+ **self.get_dummy_inputs(device),
+ return_dict=False,
+ )[0]
+
+ image_slice = image[0, -3:, -3:, -1]
+ image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
+
+ assert image.shape == (1, 64, 64, 3)
+
+ expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503])
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
+ assert (
+ np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
+
+ @require_torch_gpu
+ def test_offloads(self):
+ pipes = []
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components).to(torch_device)
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_model_cpu_offload()
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_sequential_cpu_offload()
+ pipes.append(sd_pipe)
+
+ image_slices = []
+ for pipe in pipes:
+ inputs = self.get_dummy_inputs(torch_device)
+ image = pipe(**inputs).images
+
+ image_slices.append(image[0, -3:, -3:, -1].flatten())
+
+ assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
+ assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
+
+ def test_inference_batch_single_identical(self):
+ super().test_inference_batch_single_identical(expected_max_diff=1e-2)
+
+
+class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22Img2ImgCombinedPipeline
+ params = ["prompt", "image"]
+ batch_params = ["prompt", "negative_prompt", "image"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = Img2ImgDummies()
+ prior_dummy = PriorDummies()
+ components = dummy.get_dummy_components()
+
+ components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ prior_dummy = PriorDummies()
+ dummy = Img2ImgDummies()
+ inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
+ inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
+ inputs.pop("image_embeds")
+ inputs.pop("negative_image_embeds")
+ return inputs
+
+ def test_kandinsky(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+
+ pipe.set_progress_bar_config(disable=None)
+
+ output = pipe(**self.get_dummy_inputs(device))
+ image = output.images
+
+ image_from_tuple = pipe(
+ **self.get_dummy_inputs(device),
+ return_dict=False,
+ )[0]
+
+ image_slice = image[0, -3:, -3:, -1]
+ image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
+
+ assert image.shape == (1, 64, 64, 3)
+
+ expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025])
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
+ assert (
+ np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
+
+ @require_torch_gpu
+ def test_offloads(self):
+ pipes = []
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components).to(torch_device)
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_model_cpu_offload()
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_sequential_cpu_offload()
+ pipes.append(sd_pipe)
+
+ image_slices = []
+ for pipe in pipes:
+ inputs = self.get_dummy_inputs(torch_device)
+ image = pipe(**inputs).images
+
+ image_slices.append(image[0, -3:, -3:, -1].flatten())
+
+ assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
+ assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
+
+ def test_inference_batch_single_identical(self):
+ super().test_inference_batch_single_identical(expected_max_diff=1e-2)
+
+
+class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22InpaintCombinedPipeline
+ params = ["prompt", "image", "mask_image"]
+ batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "negative_prompt",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummy = InpaintDummies()
+ prior_dummy = PriorDummies()
+ components = dummy.get_dummy_components()
+
+ components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ prior_dummy = PriorDummies()
+ dummy = InpaintDummies()
+ inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
+ inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
+ inputs.pop("image_embeds")
+ inputs.pop("negative_image_embeds")
+ return inputs
+
+ def test_kandinsky(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+
+ pipe.set_progress_bar_config(disable=None)
+
+ output = pipe(**self.get_dummy_inputs(device))
+ image = output.images
+
+ image_from_tuple = pipe(
+ **self.get_dummy_inputs(device),
+ return_dict=False,
+ )[0]
+
+ image_slice = image[0, -3:, -3:, -1]
+ image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
+
+ assert image.shape == (1, 64, 64, 3)
+
+ expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816])
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
+ assert (
+ np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
+ ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
+
+ @require_torch_gpu
+ def test_offloads(self):
+ pipes = []
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components).to(torch_device)
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_model_cpu_offload()
+ pipes.append(sd_pipe)
+
+ components = self.get_dummy_components()
+ sd_pipe = self.pipeline_class(**components)
+ sd_pipe.enable_sequential_cpu_offload()
+ pipes.append(sd_pipe)
+
+ image_slices = []
+ for pipe in pipes:
+ inputs = self.get_dummy_inputs(torch_device)
+ image = pipe(**inputs).images
+
+ image_slices.append(image[0, -3:, -3:, -1].flatten())
+
+ assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
+ assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
+
+ def test_inference_batch_single_identical(self):
+ super().test_inference_batch_single_identical(expected_max_diff=1e-2)
diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
index 069854325fd4..17f27d0d7804 100644
--- a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
+++ b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
@@ -37,29 +37,7 @@
enable_full_determinism()
-class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyV22Img2ImgPipeline
- params = ["image_embeds", "negative_image_embeds", "image"]
- batch_params = [
- "image_embeds",
- "negative_image_embeds",
- "image",
- ]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "strength",
- "guidance_scale",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -78,7 +56,7 @@ def time_embed_dim(self):
@property
def cross_attention_dim(self):
- return 100
+ return 32
@property
def dummy_unet(self):
@@ -184,6 +162,38 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22Img2ImgPipeline
+ params = ["image_embeds", "negative_image_embeds", "image"]
+ batch_params = [
+ "image_embeds",
+ "negative_image_embeds",
+ "image",
+ ]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "strength",
+ "guidance_scale",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummies = Dummies()
+ return dummies.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummies = Dummies()
+ return dummies.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky_img2img(self):
device = "cpu"
@@ -207,9 +217,7 @@ def test_kandinsky_img2img(self):
assert image.shape == (1, 64, 64, 3)
- expected_slice = np.array(
- [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263]
- )
+ expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
index 9be3993acc6f..436c240e1ac8 100644
--- a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
+++ b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
@@ -37,30 +37,7 @@
enable_full_determinism()
-class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyV22InpaintPipeline
- params = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
- batch_params = [
- "image_embeds",
- "negative_image_embeds",
- "image",
- "mask_image",
- ]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -79,7 +56,7 @@ def time_embed_dim(self):
@property
def cross_attention_dim(self):
- return 100
+ return 32
@property
def dummy_unet(self):
@@ -165,8 +142,8 @@ def get_dummy_inputs(self, device, seed=0):
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
# create mask
- mask = np.ones((64, 64), dtype=np.float32)
- mask[:32, :32] = 0
+ mask = np.zeros((64, 64), dtype=np.float32)
+ mask[:32, :32] = 1
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
@@ -186,6 +163,39 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22InpaintPipeline
+ params = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
+ batch_params = [
+ "image_embeds",
+ "negative_image_embeds",
+ "image",
+ "mask_image",
+ ]
+ required_optional_params = [
+ "generator",
+ "height",
+ "width",
+ "latents",
+ "guidance_scale",
+ "num_inference_steps",
+ "return_dict",
+ "guidance_scale",
+ "num_images_per_prompt",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummies = Dummies()
+ return dummies.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummies = Dummies()
+ return dummies.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky_inpaint(self):
device = "cpu"
@@ -207,8 +217,6 @@ def test_kandinsky_inpaint(self):
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
- print(f"image.shape {image.shape}")
-
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
@@ -244,8 +252,8 @@ def test_kandinsky_inpaint(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
- mask = np.ones((768, 768), dtype=np.float32)
- mask[:250, 250:-250] = 0
+ mask = np.zeros((768, 768), dtype=np.float32)
+ mask[:250, 250:-250] = 1
prompt = "a hat"
diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py b/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py
index 1b8cefa91f4e..3191f6a11309 100644
--- a/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py
+++ b/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py
@@ -37,22 +37,7 @@
enable_full_determinism()
-class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyV22PriorPipeline
- params = ["prompt"]
- batch_params = ["prompt", "negative_prompt"]
- required_optional_params = [
- "num_images_per_prompt",
- "generator",
- "num_inference_steps",
- "latents",
- "negative_prompt",
- "guidance_scale",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
+class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@@ -183,6 +168,31 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs
+
+class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = KandinskyV22PriorPipeline
+ params = ["prompt"]
+ batch_params = ["prompt", "negative_prompt"]
+ required_optional_params = [
+ "num_images_per_prompt",
+ "generator",
+ "num_inference_steps",
+ "latents",
+ "negative_prompt",
+ "guidance_scale",
+ "output_type",
+ "return_dict",
+ ]
+ test_xformers_attention = False
+
+ def get_dummy_components(self):
+ dummies = Dummies()
+ return dummies.get_dummy_components()
+
+ def get_dummy_inputs(self, device, seed=0):
+ dummies = Dummies()
+ return dummies.get_dummy_inputs(device=device, seed=seed)
+
def test_kandinsky_prior(self):
device = "cpu"